コード例 #1
0
def by_address(queryset, address='', project=False):
    """
  Filter queryset by address

  If project=True, we also apply a project exclusive filter
  """
    if address:
        address = json.loads(address)

        if u'address_components' in address:
            q_objs = []

            if len(address[u'address_components']):
                for component in address[u'address_components']:
                    q_obj = SQ()

                    for component_type in component[u'types']:
                        type_string = helpers.whoosh_raw(u"{}-{}".format(
                            component[u'long_name'], component_type).strip())
                        q_obj.add(SQ(address_components=type_string), SQ.OR)
                        q_obj.add(SQ(skip_address_filter=1), SQ.OR)

                    q_objs.append(q_obj)

                # Filter all address components
                for obj in q_objs:
                    queryset = queryset.filter(obj)
            else:  # remote projects
                if project:
                    queryset = queryset.filter(can_be_done_remotely=1)
    return queryset
コード例 #2
0
ファイル: forms.py プロジェクト: kamni/nodonuts
    def search(self):
        # TODO: docs and tests
        if not self.is_valid():
            return self.no_query_found()
        
        self.q = self.cleaned_data.get('q')
        self.user = not self.cleaned_data.get('all') and self.user or None
        self.order = self.cleaned_data.get('order')
        self.tags = [tag.strip() for tag in self.cleaned_data.get('tags').split(' ') if tag.strip()]
        try:
            self.ss = int(self.cleaned_data.get('ss'))
        except ValueError:
            self.ss = None
        
        if self.q:
            query = self.searchqueryset.auto_query(self.q)
        else:
            query = self.searchqueryset.all()
        
        if self.user:
            query = query.filter(added_by=self.user)
        
        if self.ss is not None:
            query = query.filter(serving_size=self.ss)
            
        sq = SQ()
        for tag in self.tags:
            sq.add(SQ(tags=tag), SQ.AND)
        query = query.filter(sq)
        
        if self.load_all:
            query = query.load_all()

        return self.order_by(query, self.order).models(Recipe)
コード例 #3
0
ファイル: forms.py プロジェクト: rheaplex/mozillians
    def search(self):
        """Search on the ES index the query sting provided by the user."""

        search_term = self.cleaned_data['q']

        sqs = super(PhonebookSearchForm, self).search()

        if sqs:
            profile = self.request.user.userprofile
            all_indexed_fields = UserProfileIndex.fields.keys()
            privacy_indexed_fields = [
                field for field in all_indexed_fields
                if field.startswith('privacy_')
            ]

            # Every profile object in mozillians.org has privacy settings.
            # Let's take advantage of this and compare the indexed fields
            # with the ones listed in a profile in order to build the query to ES.
            query = SQ()
            for p_field in privacy_indexed_fields:
                # this is the field that we are going to query
                q_field = p_field.split('_', 1)[1]
                if hasattr(profile, q_field):
                    # The user needs to have less or equal permission number with the queried field
                    # (lower number, means greater permission level)
                    q_args = {
                        q_field: search_term,
                        '{0}__gte'.format(p_field): profile.privacy_level
                    }
                    query.add(SQ(**q_args), SQ.OR)

            sqs = sqs.filter(query)
        return sqs
コード例 #4
0
ファイル: forms.py プロジェクト: codeadict/ecomarket
 def filter_ships_to(self, sqs):
     val = self.cleaned_data['ships_to']
     if val:
         sq = SQ()
         sq.add(SQ(ships_to__contains=val), SQ.OR)
         sq.add(SQ(ships_to='worldwide'), SQ.OR)
         sqs = sqs.filter(sq)
     return sqs
コード例 #5
0
def by_causes(queryset, cause_string=None):
    """ Filter queryset by a comma delimeted cause list """
    if cause_string:
        operator, items = get_operator_and_items(cause_string)
        q_obj = SQ()
        for c in items:
            if len(c) > 0:
                q_obj.add(SQ(causes=c), operator)
        queryset = queryset.filter(q_obj)
    return queryset
コード例 #6
0
def by_skills(queryset, skill_string=None):
    """ Filter queryset by a comma delimeted skill list """
    if skill_string:
        operator, items = get_operator_and_items(skill_string)
        q_obj = SQ()
        for s in items:
            if len(s) > 0:
                q_obj.add(SQ(skills=s), operator)
        queryset = queryset.filter(q_obj)
    return queryset
コード例 #7
0
def by_date(queryset, date_string=None):
    """ Filter queryset by a comma delimeted date """
    if date_string:
        operator, items = get_operator_and_items(date_string)
        q_obj = SQ()
        date = datetime.strptime(items[0] + ' 00:00:00', '%Y-%m-%d %H:%M:%S')
        q_obj.add(SQ(start_date=date) | SQ(end_date=date), operator)

        queryset = queryset.filter(q_obj)

    return queryset
コード例 #8
0
ファイル: views.py プロジェクト: JvGinkel/openode
    def prepare_search_queryset(self):
        sq = SearchQuerySet()

        filter_types = self.form.get_filter_types()
        _sq = SQ()
        for or_filter in filter_types:
            _sq.add(SQ(**or_filter), SQ.OR)

        if not filter_types:
            return sq
        return sq.filter(_sq)
コード例 #9
0
ファイル: views.py プロジェクト: OnTheWayJohn/openode
    def prepare_search_queryset(self):
        sq = SearchQuerySet()

        filter_types = self.form.get_filter_types()
        _sq = SQ()
        for or_filter in filter_types:
            _sq.add(SQ(**or_filter), SQ.OR)

        if not filter_types:
            return sq
        return sq.filter(_sq)
コード例 #10
0
    def search(self):
        sqs = super(ArticleFacetedSearchForm, self).search()

        if len(self.compendium_types) > 0 or len(self.research_fields) > 0:
            sq = SQ()
            for compendium_type in self.compendium_types:
                sq.add(SQ(compendium_type=compendium_type), SQ.OR)
            for research_field in self.research_fields:
                sq.add(SQ(primary_research_field=research_field), SQ.OR)
            return sqs.filter(sq)
        # otherwise just pass through
        return sqs
コード例 #11
0
    def get_queryset(self):
        """ return the approved lawyers
        if we have a query string then use that to filter """
        logger.info('Using ElasticSearch')
        sq = SQ()
        for value in [value for key,value in self.request.GET.items() if key in ['q','location']]:
            if value:
                term = Clean(urlparse.unquote(value))
                sq.add(SQ(content=term), SQ.AND)
                sq.add(SQ(practice_locations=term), SQ.OR)

        return SearchQuerySet().filter(sq).order_by('-fee_packages')
コード例 #12
0
def by_address(queryset, address='', project=False):
    """
  Filter queryset by publish status.

  If project=True, we also apply a project exclusive filter
  """
    if address:
        address = json.loads(address)
        if u'address_components' in address:
            q_objs = []
            """
      Caribbean filter
      """
            if len(address[u'address_components']):
                if address[u'address_components'][0][
                        'long_name'] == 'Caribbean':
                    queryset = queryset.filter(
                        SQ(address_components=helpers.whoosh_raw(
                            u"{}-{}".format('Jamaica', 'country').strip()))
                        | SQ(address_components=helpers.whoosh_raw(
                            u"{}-{}".format('Haiti', 'country').strip()))
                        | SQ(address_components=helpers.whoosh_raw(
                            u"{}-{}".format('Saint Lucia', 'country').strip()))
                        | SQ(address_components=helpers.whoosh_raw(
                            u"{}-{}".format('Suriname', 'country').strip()))
                        | SQ(address_components=helpers.whoosh_raw(
                            u"{}-{}".format('Trinidad & Tobago',
                                            'country').strip())))

                    return queryset

                for component in address[u'address_components']:
                    q_obj = SQ()
                    test = ''

                    for component_type in component[u'types']:
                        type_string = helpers.whoosh_raw(u"{}-{}".format(
                            component[u'long_name'], component_type).strip())
                        q_obj.add(SQ(address_components=type_string), SQ.OR)

                    q_objs.append(q_obj)

                # Filter all address components
                for obj in q_objs:
                    queryset = queryset.filter(obj)
            else:  # remote projects
                if project:
                    queryset = queryset.filter(can_be_done_remotely=1)
    return queryset
コード例 #13
0
ファイル: views.py プロジェクト: PierreHao/manny_search
    def get_queryset(self, **kwargs):
        sq = SQ()
        if not self.request.GET.items():
            query_set = SearchQuerySet().all()

        else:

            term = Clean(self.request.GET.get('q'))
            if term:
                sq.add(SQ(content=term), SQ.OR)
                sq.add(SQ(brand_name=term), SQ.OR)

            query_set = SearchQuerySet().filter(sq)

        return ProductSerializer([o.object for o in query_set], many=True).data
コード例 #14
0
ファイル: forms.py プロジェクト: MiltosD/CEFELRC
    def search(self):
        """
        A blend of its super methods with only a different base
        `SearchQuerySet` in case of empty/invalid queries.
        """
        sqs = self.searchqueryset
        if self.is_valid() and self.cleaned_data.get('q'):
            # extract special queries
            special_queries, query = \
              _extract_special_queries(self.cleaned_data.get('q'))
            if query:
                sqs = sqs.auto_query(query)
            if (special_queries):
                # for each special query, get the Django internal resource ids
                # matching the query and filter the SearchQuerySet accordingly
                for _sq in special_queries:
                    _res_ids = _process_special_query(_sq)
                    if _res_ids:
                        _sq = SQ()
                        for _id in _res_ids:
                            _sq.add(SQ(django_id=_id), SQ.OR)
                        sqs = sqs.filter(_sq)
                    else:
                        # force empty search result if no ids are returned
                        # for a special query
                        sqs = sqs.none()
                        break
        if self.load_all:
            sqs = sqs.load_all()
        # we need to process each facet to ensure that the field name and the
        # value are quoted correctly and separately:
        for facet in [f for f in self.selected_facets if ":" in f]:
            field, value = facet.split(":", 1)
            # only add facets which are also in the search index
            # pylint: disable-msg=E1101
            if not field in resourceInfoType_modelIndex.fields:
                LOGGER.info('Ignoring unknown facet field "%s".', field)
                continue
            if value:
                sqs = sqs.narrow(u'%s:"%s"' % (field, sqs.query.clean(value)))
        if not is_member(self.request.user, 'ecmembers') and not self.request.user.is_superuser:
            sqs = sqs.filter_and(publicationStatusFilter__exact='published')

        return sqs
コード例 #15
0
ファイル: forms.py プロジェクト: sudhanshuchopra/mozillians
    def search(self):
        """Search on the ES index the query sting provided by the user."""

        search_term = self.cleaned_data['q']

        # Calling super will handle with form validation and
        # will also search in fields that are not explicit queried through `text`
        sqs = super(PhonebookSearchForm, self).search()

        if not sqs:
            return self.no_query_found()

        # Profiles Search
        profile = self.request.user.userprofile
        all_indexed_fields = UserProfileIndex.fields.keys()
        privacy_indexed_fields = [
            field for field in all_indexed_fields
            if field.startswith('privacy_')
        ]
        query = SQ()
        q_args = {}
        # Every profile object in mozillians.org has privacy settings.
        # Let's take advantage of this and compare the indexed fields
        # with the ones listed in a profile in order to build the query to ES.
        for p_field in privacy_indexed_fields:
            # this is the field that we are going to query
            q_field = p_field.split('_', 1)[1]
            if hasattr(profile, q_field):
                # The user needs to have less or equal permission number with the queried field
                # (lower number, means greater permission level)
                q_args = {
                    q_field: search_term,
                    '{0}__gte'.format(p_field): profile.privacy_level
                }
                query.add(SQ(**q_args), SQ.OR)

        # Group Search
        # We need to exclude non visible groups.
        query.add(SQ(**{'visible': True}), SQ.OR)

        sqs = sqs.filter(query)
        return sqs
コード例 #16
0
 def search(self):
     """
     A blend of its super methods with only a different base
     `SearchQuerySet` in case of empty/invalid queries.
     """
     sqs = self.searchqueryset
     if self.is_valid() and self.cleaned_data.get('q'):
         # extract special queries
         special_queries, query = \
           _extract_special_queries(self.cleaned_data.get('q'))
         if query:
             sqs = sqs.auto_query(query)
         if (special_queries):
             # for each special query, get the Django internal resource ids
             # matching the query and filter the SearchQuerySet accordingly
             for _sq in special_queries:
                 _res_ids = _process_special_query(_sq)
                 if _res_ids:
                     _sq = SQ()
                     for _id in _res_ids:
                         _sq.add(SQ(django_id=_id), SQ.OR)
                     sqs = sqs.filter(_sq)
                 else:
                     # force empty search result if no ids are returned
                     # for a special query
                     sqs = sqs.none()
                     break
     if self.load_all:
         sqs = sqs.load_all()
     # we need to process each facet to ensure that the field name and the
     # value are quoted correctly and separately:
     for facet in [f for f in self.selected_facets if ":" in f]:
         field, value = facet.split(":", 1)
         # only add facets which are also in the search index
         # pylint: disable-msg=E1101
         if not field in resourceInfoType_modelIndex.fields:
             LOGGER.info('Ignoring unknown facet field "%s".', field)
             continue
         if value:
             sqs = sqs.narrow(u'%s:"%s"' % (field, sqs.query.clean(value)))
     return sqs
コード例 #17
0
ファイル: forms.py プロジェクト: yogesh-kamble/mozillians
    def search(self):
        """Search on the ES index the query sting provided by the user."""

        search_term = self.cleaned_data['q']

        # Calling super will handle with form validation and
        # will also search in fields that are not explicit queried through `text`
        sqs = super(PhonebookSearchForm, self).search()

        if not sqs:
            return self.no_query_found()

        # Profiles Search
        profile = self.request.user.userprofile
        all_indexed_fields = UserProfileIndex.fields.keys()
        privacy_indexed_fields = [field for field in all_indexed_fields
                                  if field.startswith('privacy_')]
        query = SQ()
        q_args = {}
        # Every profile object in mozillians.org has privacy settings.
        # Let's take advantage of this and compare the indexed fields
        # with the ones listed in a profile in order to build the query to ES.
        for p_field in privacy_indexed_fields:
            # this is the field that we are going to query
            q_field = p_field.split('_', 1)[1]
            if hasattr(profile, q_field):
                # The user needs to have less or equal permission number with the queried field
                # (lower number, means greater permission level)
                q_args = {
                    q_field: search_term,
                    '{0}__gte'.format(p_field): profile.privacy_level
                }
                query.add(SQ(**q_args), SQ.OR)

        # Group Search
        # We need to exclude non visible groups.
        query.add(SQ(**{'visible': True}), SQ.OR)

        sqs = sqs.filter(query)
        return sqs
コード例 #18
0
ファイル: search.py プロジェクト: Dpetters/Umeqo
def search(sqs, query):
    or_terms = map(lambda x: x.strip(), query.split("OR"))
    final_query = SQ()
    for query in or_terms:
        if query:
            current_query = SQ()
            # Pull out anything wrapped in quotes and do an exact match on it.
            open_quote_position = None
            non_exact_query = query
            for offset, char in enumerate(query):
                if char == '"':
                    if open_quote_position != None:
                        current_match = non_exact_query[open_quote_position +
                                                        1:offset]
                        if current_match:
                            current_query.add(
                                SQ(text__exact=sqs.query.clean(current_match)),
                                SQ.AND)
                        non_exact_query = non_exact_query.replace(
                            '"%s"' % current_match, '', 1)
                        open_quote_position = None
                    else:
                        open_quote_position = offset

            # Pseudo-tokenize the rest of the query.
            keywords = non_exact_query.split()

            # Loop through keywords and add filters to the query.
            for keyword in keywords:
                exclude = False
                if keyword.startswith('-') and len(keyword) > 1:
                    keyword = keyword[1:]
                    exclude = True
                cleaned_keyword = sqs.query.clean(keyword)
                if exclude:
                    current_query.add(~SQ(text=cleaned_keyword), SQ.AND)
                else:
                    current_query.add(SQ(text=cleaned_keyword), SQ.AND)
            final_query.add(current_query, SQ.OR)
    return sqs.filter(final_query)
コード例 #19
0
def by_disponibility(queryset, disponibility_string=None):
    """ Filter queryset by a comma delimeted disponibility list """
    if disponibility_string:
        operator, items = get_operator_and_items(disponibility_string)
        q_obj = SQ()
        for d in items:
            if len(d) > 0 and d == 'job':
                q_obj.add(SQ(job=True), operator)
            elif len(d) > 0 and d == 'work':
                q_obj.add(SQ(work=True), operator)
            elif len(d) > 0 and d == 'remotely':
                q_obj.add(SQ(can_be_done_remotely=True), operator)

        queryset = queryset.filter(q_obj)
    return queryset
コード例 #20
0
ファイル: search.py プロジェクト: Dpetters/Umeqo
def search(sqs, query):
    or_terms = map(lambda x: x.strip(), query.split("OR"))
    final_query = SQ()
    for query in or_terms:
        if query:
            current_query = SQ()
            # Pull out anything wrapped in quotes and do an exact match on it.
            open_quote_position = None
            non_exact_query = query
            for offset, char in enumerate(query):
                if char == '"':
                    if open_quote_position != None:
                        current_match = non_exact_query[open_quote_position + 1:offset]
                        if current_match:
                            current_query.add(SQ(text__exact=sqs.query.clean(current_match)), SQ.AND)
                        non_exact_query = non_exact_query.replace('"%s"' % current_match, '', 1)
                        open_quote_position = None
                    else:
                        open_quote_position = offset
            
            # Pseudo-tokenize the rest of the query.
            keywords = non_exact_query.split()
            
            # Loop through keywords and add filters to the query.
            for keyword in keywords:
                exclude = False
                if keyword.startswith('-') and len(keyword) > 1:
                    keyword = keyword[1:]
                    exclude = True
                cleaned_keyword = sqs.query.clean(keyword)
                if exclude:
                    current_query.add(~SQ(text = cleaned_keyword), SQ.AND)
                else:
                    current_query.add(SQ(text = cleaned_keyword), SQ.AND)
            final_query.add(current_query, SQ.OR)
    return sqs.filter(final_query)
コード例 #21
0
ファイル: collection.py プロジェクト: NyakudyaA/django-bims
    def apply_filter(query_value, filters, ignore_bbox=False, only_site=False):
        """
        Apply filter and do the search to biological collection
        record and location site

        :param query_value: str
        :param filters: dict
        :param ignore_bbox: bool
        :returns:
        - collection_results : results from bio collection record
        - site_results : results from location site
        - fuzzy_search : if results from search is fuzzy search
        """

        fuzzy_search = False
        user_boundaries = None
        filter_mode = False

        sqs = SearchQuerySet()
        settings.ELASTIC_MIN_SCORE = 0

        # All filters
        taxon = filters.get('taxon', None)
        bbox = filters.get('bbox', None)
        query_collector = filters.get('collector', None)
        boundary = filters.get('boundary', None)
        user_boundary = filters.get('userBoundary', None)
        query_category = filters.get('category', None)
        reference_category = filters.get('referenceCategory', None)
        reference = filters.get('reference', None)
        year_from = filters.get('yearFrom', None)
        year_to = filters.get('yearTo', None)
        months = filters.get('months', None)
        site_id = filters.get('siteId', None)
        endemic = filters.get('endemic', None)
        conservation_status = filters.get('conservationStatus', None)
        river_catchments = filters.get('riverCatchment', None)

        if (
                taxon or
                query_collector or
                boundary or user_boundary or
                query_category or reference_category or
                year_from or year_to or
                months or reference or
                conservation_status or
                river_catchments or
                site_id or endemic):
            filter_mode = True

        if query_value:
            clean_query = sqs.query.clean(query_value)
            results = sqs.filter(
                SQ(original_species_name_exact__contains=clean_query) |
                SQ(taxon_scientific_name_exact__contains=clean_query) |
                SQ(vernacular_names__contains=clean_query),
                validated=True
            ).models(BiologicalCollectionRecord)

            if len(results) > 0:
                fuzzy_search = False
            else:
                fuzzy_search = True
                # Set min score bigger for fuzzy search
                settings.ELASTIC_MIN_SCORE = 2
                results = sqs.filter(
                    SQ(original_species_name=clean_query),
                    validated=True
                ).models(BiologicalCollectionRecord)
                settings.ELASTIC_MIN_SCORE = 0
        else:
            if filter_mode:
                results = sqs.all().models(
                    BiologicalCollectionRecord)
                results = results.filter(validated=True)
            else:
                results = []

        if taxon:
            results = sqs.filter(
                taxonomy=taxon
            ).models(BiologicalCollectionRecord)

        # get by bbox
        if not ignore_bbox:
            if bbox:
                bbox_array = bbox.split(',')
                downtown_bottom_left = Point(
                    float(bbox_array[1]),
                    float(bbox_array[0]))

                downtown_top_right = Point(
                    float(bbox_array[3]),
                    float(bbox_array[2]))

                results = results.within(
                    'location_center',
                    downtown_bottom_left,
                    downtown_top_right)

        # additional filters
        # query by collectors
        if query_collector:
            qs_collector = SQ()
            qs = json.loads(query_collector)
            for query in qs:
                qs_collector.add(SQ(collector=query), SQ.OR)
            results = results.filter(qs_collector)

        if boundary:
            qs_collector = SQ()
            qs = json.loads(boundary)
            for query in qs:
                query = '_' + query + '_'
                qs_collector.add(SQ(boundary__contains=query), SQ.OR)
            results = results.filter(qs_collector)

        if user_boundary:
            qs = json.loads(user_boundary)
            user_boundaries = UserBoundary.objects.filter(
                pk__in=qs
            )
            for user_boundary in user_boundaries:
                for geom in user_boundary.geometry:
                    results = results.polygon(
                        'location_center',
                        geom
                    )

        # query by category
        if query_category:
            qs_category = SQ()
            qs = json.loads(query_category)
            for query in qs:
                qs_category.add(SQ(category=query), SQ.OR)
            results = results.filter(qs_category)

        # query by endemic
        if endemic:
            qs_endemism = SQ()
            qs = json.loads(endemic)
            for query in qs:
                qs_endemism.add(SQ(endemism=query), SQ.OR)
            results = results.filter(qs_endemism)

        # query by conservation status
        if conservation_status:
            qs_conservation_status = SQ()
            qs = json.loads(conservation_status)
            for query in qs:
                qs_conservation_status.add(SQ(iucn_status=query), SQ.OR)
            results = results.filter(qs_conservation_status)

        # query by river catchment
        if river_catchments:
            qs_river_catchment = SQ()
            qs = json.loads(river_catchments)
            for query in qs:
                query = '_' + query.replace(' ', '_') + '_'
                qs_river_catchment.add(SQ(river_catchments__contains=query),
                                       SQ.OR)
            results = results.filter(qs_river_catchment)

        # query by reference category
        if reference_category:
            qs_reference_category = SQ()
            qs = json.loads(reference_category)
            for query in qs:
                qs_reference_category.add(SQ(reference_category=query), SQ.OR)
            results = results.filter(qs_reference_category)

        # query by reference category
        if reference:
            qs_reference = SQ()
            qs = json.loads(reference)
            for query in qs:
                qs_reference.add(SQ(reference__exact=query),
                                 SQ.OR)
            results = results.filter(qs_reference)

        # query by year from
        if year_from:
            clean_query_year_from = sqs.query.clean(year_from)
            results = results.filter(
                collection_date_year__gte=clean_query_year_from)

        # query by year to
        if year_to:
            clean_query_year_to = sqs.query.clean(year_to)
            results = results.filter(
                collection_date_year__lte=clean_query_year_to)

        # query by months
        if months:
            qs = months.split(',')
            qs_month = SQ()
            for month in qs:
                clean_query_month = sqs.query.clean(month)
                qs_month.add(
                    SQ(collection_date_month=clean_query_month), SQ.OR)
            results = results.filter(qs_month)

        # Search by site id
        if site_id:
            site_ids = site_id.split(',')
            qs_site_id = SQ()
            for site in site_ids:
                qs_site_id.add(
                    SQ(site_id_indexed=site), SQ.OR
                )
            results = results.filter(
                qs_site_id
            ).models(BiologicalCollectionRecord)

        collection_results = results

        # Search location site by name
        location_site_search = EmptySearchQuerySet()
        if query_value:
            location_site_search = SearchQuerySet().filter(
                site_name__contains=query_value
            ).models(LocationSite)

        location_site_results = location_site_search
        location_site_user_boundary = EmptySearchQuerySet()

        if boundary:
            qs_collector = SQ()
            qs = json.loads(boundary)
            for query in qs:
                query = '_' + query + '_'
                qs_collector.add(SQ(boundary__contains=query), SQ.OR)
            if isinstance(location_site_results, SearchQuerySet):
                location_site_results = location_site_results.filter(
                    qs_collector)

        if user_boundaries and isinstance(location_site_search,
                                          SearchQuerySet):
            location_site_user_boundary = location_site_search
            for user_boundary in user_boundaries:
                for geom in user_boundary.geometry:
                    location_site_user_boundary = \
                        location_site_user_boundary.polygon(
                            'location_site_point',
                            geom)

        site_results = GetCollectionAbstract.combine_search_query_results(
            location_site_results,
            location_site_user_boundary
        )

        if len(site_results) > 0 or isinstance(
                location_site_user_boundary, SearchQuerySet):
            # If there are fuzzy results from collection search but we
            # got non fuzzy results from location site, then remove
            # all the fuzzy results from collection
            if fuzzy_search and \
                    len(collection_results) > 0:
                collection_results = []
            fuzzy_search = False

        return collection_results, site_results, fuzzy_search
コード例 #22
0
    def search(self):
        """Search on the ES index the query sting provided by the user."""

        search_term = self.cleaned_data.get('q')
        profile = None
        location_query = {}

        if self.country:
            location_query['country'] = self.country
            location_query['privacy_country__gte'] = None
        if self.region:
            location_query['region'] = self.region
            location_query['privacy_region__gte'] = None
        if self.city:
            location_query['city'] = self.city
            location_query['privacy_city__gte'] = None

        try:
            profile = self.request.user.userprofile
        except AttributeError:
            # This is an AnonymousUser
            privacy_level = PUBLIC
        else:
            privacy_level = profile.privacy_level

        if profile and profile.is_vouched:
            # If this is empty, it will default to all models.
            search_models = self.get_models()
        else:
            # Anonymous and un-vouched users cannot search groups
            search_models = [UserProfile, IdpProfile]

        if location_query:
            for k in location_query.keys():
                if k.startswith('privacy_'):
                    location_query[k] = privacy_level
            return SearchQuerySet().filter(
                **location_query).load_all() or self.no_query_found()

        # Calling super will handle with form validation and
        # will also search in fields that are not explicit queried through `text`
        sqs = super(PhonebookSearchForm, self).search().models(*search_models)

        if not sqs:
            return self.no_query_found()

        query = SQ()
        q_args = {}
        # Profiles Search
        all_indexed_fields = UserProfileIndex.fields.keys(
        ) + IdpProfileIndex.fields.keys()
        privacy_indexed_fields = [
            field for field in all_indexed_fields
            if field.startswith('privacy_')
        ]
        # Every profile object in mozillians.org has privacy settings.
        # Let's take advantage of this and compare the indexed fields
        # with the ones listed in a profile in order to build the query to ES.
        for p_field in privacy_indexed_fields:
            # this is the field that we are going to query
            q_field = p_field.split('_', 1)[1]
            # The user needs to have less or equal permission number with the queried field
            # (lower number, means greater permission level)
            q_args = {
                q_field: search_term,
                '{0}__gte'.format(p_field): privacy_level
            }
            query.add(SQ(**q_args), SQ.OR)

        # Username is always public
        query.add(SQ(**{'username': search_term}), SQ.OR)

        # Group Search
        if not search_models or Group in search_models:
            # Filter only visible groups.
            query.add(SQ(**{'visible': True}), SQ.OR)

        return sqs.filter(query).load_all()
コード例 #23
0
ファイル: views.py プロジェクト: ddsc/ddsc-site
def filter_annotations(request, sqs):
    # The current situation makes no sense: authenticated users can see fewer
    # annotations than anonymous users. Let's repair this by returning an
    # empty queryset if the current user has not logged in.
    if not request.user.is_authenticated():
        return sqs.none()
    # category
    category = request.GET.get('category')
    if category:
        sqs = sqs.filter(category__exact=category)
    # location
    bbox = request.GET.get('bbox')
    bottom_left = request.GET.get('bottom_left')
    top_right = request.GET.get('top_right')
    north = request.GET.get('north')
    east = request.GET.get('east')
    south = request.GET.get('south')
    west = request.GET.get('west')
    if bbox:
        if bbox == 'test':
            bottom_left = '48.0', '4.0'
            top_right = '52.0', '10.0'
        else:
            # lon_min, lat_min, lon_max, lat_max
            # west, south, east, north
            x_min, y_min, x_max, y_max = bbox.split(',')
            bottom_left = y_min, x_min
            top_right = y_max, x_max
    elif bottom_left and top_right:
        bottom_left = bottom_left.split(',')
        top_right = top_right.split(',')
    elif north and east and south and west:
        bottom_left = south, west
        top_right = north, east
    else:
        bottom_left = None
        top_right = None
    if bottom_left and top_right:
        bottom_left = Point(float(bottom_left[0]), float(bottom_left[1]))
        top_right = Point(float(top_right[0]), float(top_right[1]))
        sqs = sqs.within('location', bottom_left, top_right)
    # As decided during the UAT on 2014-09-09: no more private annotations,
    # all annotations will be public. Hence, we don't need filtering.
    # Well, not entirely: public for authenticated users only.
    ### user
    ##username = request.user.username
    ### allow username overriding in DEBUG mode
    ### this is a possible security leak
    ##username_override = request.GET.get('username_override')
    ##if settings.DEBUG and username_override:
    ##    username = username_override
    ##sqs = sqs.filter(
    ##    # either private and linked to the current user
    ##    SQ(username__exact=username, visibility=Visibility.PRIVATE) |
    ##    # or public
    ##    SQ(visibility=Visibility.PUBLIC)
    ##)
    # relation to model instances
    the_model_name = request.GET.get('model_name')
    the_model_pk = request.GET.get('model_pk')
    if the_model_name and the_model_pk:
        sqs = sqs.filter(
            the_model_name__exact=the_model_name,
            the_model_pk__exact=the_model_pk)
    else:
        # allow multiple models and pks
        model_names_pks = request.GET.get('model_names_pks')
        if model_names_pks:
            model_names_pks = model_names_pks.split(';')
            sq = SQ()
            for model_name_pk in model_names_pks:
                model_name, model_pk = model_name_pk.split(',')
                sq.add(
                    SQ(the_model_name__exact=model_name,
                       the_model_pk__exact=model_pk),
                    SQ.OR)
            sqs = sqs.filter(sq)
    # date range
    datetime_from = request.GET.get('datetime_from')
    if datetime_from:
        datetime_from = dateutil.parser.parse(datetime_from)
        sqs = sqs.filter(datetime_from__gte=datetime_from)
    datetime_until = request.GET.get('datetime_until')
    if datetime_until:
        datetime_until = dateutil.parser.parse(datetime_until)
        sqs = sqs.filter(datetime_until__lte=datetime_until)
    # full text
    text = request.GET.get('text')
    if text:
        sqs = sqs.filter(text__contains=text)
    tags = request.GET.get('tags')
    if tags:
        sqs = sqs.filter(tags__contains=tags)

    return sqs
コード例 #24
0
ファイル: api.py プロジェクト: Virtual-Lab/sound-colour-space
    def get_search(self, request, **kwargs):

        self.method_check(request, allowed=['get'])
        self.is_authenticated(request)
        self.throttle_check(request)

        #####################################################################
        #  get query for multiple time used key, like:
        # ?q=author::peter&q=fulltext::blabla&tags=foo,bar,etc
        #####################################################################

        results = SearchQuerySet().models(Entry).all()

        query = None
        for x in request.GET.lists():
            if x[0] == 'q':
                query = x[1]

        order_by = request.GET.get('order_by', 'date')
        date_range = request.GET.get('date__range', None)
        tags = request.GET.get('tags', [])
        cat = request.GET.get('category', None)

        if cat:
            if cat == 'tone_systems':
                # for some f*****g reason, i must exclude and cannot use filter for 'TO' category.. :S
                results = SearchQuerySet().models(Entry).exclude(category='CO')
            elif cat == 'colour_systems':
                category = 'CO'
                results = SearchQuerySet().models(Entry).filter(category=category)
            else:
                results = EmptySearchQuerySet()


        match = request.GET.get('match', 'OR')
        operator = SQ.OR if (match == 'OR') else SQ.AND

        search_items = []

        if query:
            '''
            search_items = [
                {
                    'scope': 'author',
                    'term': 'Zarlino'
                },
                {
                    'scope': 'fulltext',
                    'term': 'blabla'
                },
            ]
            '''

            for item in query:
                search_item = item.split('::')
                search_items.append({'scope': search_item[0], 'term': search_item[1]})

            print (search_items)

            # filter search masks
            sq = SQ()

            for item in search_items:
                kwargs = {
                    # ie: author=AutoQuery
                    item['scope']: get_query_class_for_item(item),
                }

                sq.add(SQ(**kwargs), operator)

            results = results.filter(sq)

            if not results:
                results = EmptySearchQuerySet()

        selected_tags = []
        if tags:
            selected_tags = [t.strip() for t in tags.split(',')]
            for tag in selected_tags:
                results = results.filter(SQ(tags=tag))

        # if we filter tags OR have a search query, get the possible tags, otherwise return all tags
        if tags or query:
            possible_tags = []
            for r in results.all():
                possible_tags += [t.pk for t in r.object.tags.all()]
            possible_tags = set(possible_tags)  # convert to set to remove duplicates
            new_tags = Keyword.objects.filter(pk__in=possible_tags).order_by('name')
        else:
            new_tags = Keyword.objects.all()

        tag_objects = [] # what we will return
        for t in new_tags: tag_objects.append(
            {"name": t.name, "slug": t.slug, "selected": True if t.slug in selected_tags else False})

        # make sure we return at least the selected tag, if no results were found
        if len(tag_objects) == 0:
            user_tags = Keyword.objects.filter(slug__in=selected_tags).order_by('name')
            for t in user_tags: tag_objects.append(
                {"name": t.name, "slug": t.slug, "selected": True})

        if date_range:
            start = date_range.split(',')[0]
            end = date_range.split(',')[1]
            results = results.filter(date__range=(start, end))

        # apply ordering
        results = results.order_by(order_by)

        # paginate
        paginator = Paginator(request.GET, results, resource_uri='/api/' + settings.API_VERSION + '/entry/search/')

        bundles = []
        for result in paginator.page()['objects']:
            bundle = self.build_bundle(obj=result.object, request=request)
            bundles.append(self.full_dehydrate(bundle))

        object_list = {
            'meta': paginator.page()['meta'],
            'objects': bundles
        }

        # object_list['meta']['search_scope'] = SEARCH_SCOPES
        object_list['meta']['search_query'] = search_items
        object_list['meta']['tags'] = tag_objects
        object_list['meta']['order_by'] = order_by
        object_list['meta']['match'] = match

        self.log_throttled_access(request)

        return self.create_response(request, object_list)
コード例 #25
0
ファイル: forms.py プロジェクト: Phyks/dissemin
    def search(self):
        self.queryset = self.searchqueryset.models(Paper)

        q = remove_diacritics(self.cleaned_data['q'])
        if q:
            self.queryset = self.queryset.auto_query(q)

        visible = self.cleaned_data['visible']
        if visible == '':
            self.filter(visible=True)
        elif visible == 'invisible':
            self.filter(visible=False)

        self.form_filter('availability', 'availability')
        self.form_filter('oa_status__in', 'oa_status')
        self.form_filter('pubdate__gte', 'pub_after')
        self.form_filter('pubdate__lte', 'pub_before')
        self.form_filter('doctype__in', 'doctypes')

        # Filter by authors.
        # authors field: a comma separated list of full/last names.
        # Items with no whitespace of prefixed with 'last:' are considered as
        # last names; others are full names.
        for name in self.cleaned_data['authors'].split(','):
            name = name.strip()

            # If part of this author name matches ORCID identifiers, consider
            # these as orcid ids and do the filtering
            orcid_ids = [x for x in name.split(' ') if validate_orcid(x)]
            for orcid_id in orcid_ids:
                try:
                    researcher = Researcher.objects.get(orcid=orcid_id)
                    self.filter(researchers=researcher.id)
                except Researcher.DoesNotExist:
                    pass
                continue
            # Rebuild a full name excluding the ORCID id terms
            name = ' '.join([x for x in name.split(' ') if x not in orcid_ids])

            name = remove_diacritics(name.strip())

            if name.startswith('last:'):
                is_lastname = True
                name = name[5:].strip()
            else:
                is_lastname = ' ' not in name

            if not name:
                continue

            if is_lastname:
                self.filter(authors_last=name)
            else:
                reversed_name = ' '.join(reversed(name.split(' ')))
                sq = SQ()
                sq.add(SQ(authors_full=Sloppy(name, slop=1)), SQ.OR)
                sq.add(SQ(authors_full=Sloppy(reversed_name, slop=1)), SQ.OR)
                self.queryset = self.queryset.filter(sq)

        self.queryset = aggregate_combined_status(self.queryset)

        status = self.cleaned_data['status']
        if status:
            self.queryset = self.queryset.post_filter(
                combined_status__in=status)

        # Default ordering by decreasing publication date
        order = self.cleaned_data['sort_by'] or '-pubdate'
        self.queryset = self.queryset.order_by(order).load_all()

        return self.queryset
コード例 #26
0
ファイル: forms.py プロジェクト: rgrunbla/dissemin
    def search(self):
        self.queryset = self.searchqueryset.models(Paper)

        q = remove_diacritics(self.cleaned_data['q'])
        if q:
            self.queryset = self.queryset.auto_query(q)

        visible = self.cleaned_data['visible']
        if visible == '':
            self.filter(visible=True)
        elif visible == 'invisible':
            self.filter(visible=False)

        self.form_filter('availability', 'availability')
        self.form_filter('oa_status__in', 'oa_status')
        self.form_filter('pubdate__gte', 'pub_after')
        self.form_filter('pubdate__lte', 'pub_before')
        self.form_filter('doctype__in', 'doctypes')

        # Filter by authors.
        # authors field: a comma separated list of full/last names.
        # Items with no whitespace of prefixed with 'last:' are considered as
        # last names; others are full names.
        for name in self.cleaned_data['authors'].split(','):
            name = name.strip()

            # If part of this author name matches ORCID identifiers, consider
            # these as orcid ids and do the filtering
            orcid_ids = [x for x in name.split(' ') if validate_orcid(x)]
            for orcid_id in orcid_ids:
                self.filter(orcids=orcid_id)

            # Rebuild a full name excluding the ORCID id terms
            name = ' '.join([x for x in name.split(' ') if x not in orcid_ids])

            name = remove_diacritics(name.strip())

            if name.startswith('last:'):
                is_lastname = True
                name = name[5:].strip()
            else:
                is_lastname = ' ' not in name

            if not name:
                continue

            if is_lastname:
                self.filter(authors_last=name)
            else:
                reversed_name = ' '.join(reversed(name.split(' ')))
                sq = SQ()
                sq.add(SQ(authors_full=Sloppy(name, slop=1)), SQ.OR)
                sq.add(SQ(authors_full=Sloppy(reversed_name, slop=1)), SQ.OR)
                self.queryset = self.queryset.filter(sq)

        self.queryset = aggregate_combined_status(self.queryset)

        status = self.cleaned_data['status']
        if status:
            self.queryset = self.queryset.post_filter(
                combined_status__in=status)

        # Default ordering by decreasing publication date
        order = self.cleaned_data['sort_by'] or '-pubdate'
        self.queryset = self.queryset.order_by(order).load_all()

        return self.queryset
コード例 #27
0
ファイル: forms.py プロジェクト: johngian/mozillians
    def search(self):
        """Search on the ES index the query sting provided by the user."""

        search_term = self.cleaned_data.get('q')
        profile = None
        location_query = {}

        if self.country:
            location_query['country'] = self.country
            location_query['privacy_country__gte'] = None
        if self.region:
            location_query['region'] = self.region
            location_query['privacy_region__gte'] = None
        if self.city:
            location_query['city'] = self.city
            location_query['privacy_city__gte'] = None

        try:
            profile = self.request.user.userprofile
        except AttributeError:
            # This is an AnonymousUser
            privacy_level = PUBLIC
        else:
            privacy_level = profile.privacy_level

        if profile and profile.is_vouched:
            # If this is empty, it will default to all models.
            search_models = self.get_models()
        else:
            # Anonymous and un-vouched users cannot search groups
            search_models = [UserProfile, IdpProfile]

        if location_query:
            for k in location_query.keys():
                if k.startswith('privacy_'):
                    location_query[k] = privacy_level
            return SearchQuerySet().filter(**location_query).load_all() or self.no_query_found()

        # Calling super will handle with form validation and
        # will also search in fields that are not explicit queried through `text`
        sqs = super(PhonebookSearchForm, self).search().models(*search_models)

        if not sqs:
            return self.no_query_found()

        query = SQ()
        q_args = {}
        # Profiles Search
        all_indexed_fields = UserProfileIndex.fields.keys() + IdpProfileIndex.fields.keys()
        privacy_indexed_fields = [field for field in all_indexed_fields
                                  if field.startswith('privacy_')]
        # Every profile object in mozillians.org has privacy settings.
        # Let's take advantage of this and compare the indexed fields
        # with the ones listed in a profile in order to build the query to ES.
        for p_field in privacy_indexed_fields:
            # this is the field that we are going to query
            q_field = p_field.split('_', 1)[1]
            # The user needs to have less or equal permission number with the queried field
            # (lower number, means greater permission level)
            q_args = {
                q_field: search_term,
                '{0}__gte'.format(p_field): privacy_level
            }
            query.add(SQ(**q_args), SQ.OR)

        # Username is always public
        query.add(SQ(**{'username': search_term}), SQ.OR)

        # Group Search
        if not search_models or Group in search_models:
            # Filter only visible groups.
            query.add(SQ(**{'visible': True}), SQ.OR)

        return sqs.filter(query).load_all()
コード例 #28
0
    def search(self):
        if not self.cleaned_data.get('q'):
            sqs = self.searchqueryset.filter(discoverable=True).filter(
                is_replaced_by=False)
        else:
            sqs = super(FacetedSearchForm, self).search().filter(
                discoverable=True).filter(is_replaced_by=False)

        geo_sq = SQ()
        if self.cleaned_data['NElng'] and self.cleaned_data['SWlng']:
            if float(self.cleaned_data['NElng']) > float(
                    self.cleaned_data['SWlng']):
                geo_sq.add(
                    SQ(coverage_east__lte=float(self.cleaned_data['NElng'])),
                    SQ.AND)
                geo_sq.add(
                    SQ(coverage_east__gte=float(self.cleaned_data['SWlng'])),
                    SQ.AND)
            else:
                geo_sq.add(
                    SQ(coverage_east__gte=float(self.cleaned_data['SWlng'])),
                    SQ.AND)
                geo_sq.add(SQ(coverage_east__lte=float(180)), SQ.OR)
                geo_sq.add(
                    SQ(coverage_east__lte=float(self.cleaned_data['NElng'])),
                    SQ.AND)
                geo_sq.add(SQ(coverage_east__gte=float(-180)), SQ.AND)

        if self.cleaned_data['NElat'] and self.cleaned_data['SWlat']:
            geo_sq.add(
                SQ(coverage_north__lte=float(self.cleaned_data['NElat'])),
                SQ.AND)
            geo_sq.add(
                SQ(coverage_north__gte=float(self.cleaned_data['SWlat'])),
                SQ.AND)

        if geo_sq:
            sqs = sqs.filter(geo_sq)

        # Check to see if a start_date was chosen.
        if self.cleaned_data['start_date']:
            sqs = sqs.filter(
                coverage_start_date__gte=self.cleaned_data['start_date'])

        # Check to see if an end_date was chosen.
        if self.cleaned_data['end_date']:
            sqs = sqs.filter(
                coverage_end_date__lte=self.cleaned_data['end_date'])

        author_sq = SQ()
        subjects_sq = SQ()
        resource_sq = SQ()
        public_sq = SQ()
        owner_sq = SQ()
        discoverable_sq = SQ()
        published_sq = SQ()
        variable_sq = SQ()
        sample_medium_sq = SQ()
        units_name_sq = SQ()
        # We need to process each facet to ensure that the field name and the
        # value are quoted correctly and separately:

        for facet in self.selected_facets:
            if ":" not in facet:
                continue

            field, value = facet.split(":", 1)

            if value:
                if "creators" in field:
                    author_sq.add(SQ(creators=sqs.query.clean(value)), SQ.OR)

                elif "subjects" in field:
                    subjects_sq.add(SQ(subjects=sqs.query.clean(value)), SQ.OR)

                elif "resource_type" in field:
                    resource_sq.add(SQ(resource_type=sqs.query.clean(value)),
                                    SQ.OR)

                elif "public" in field:
                    public_sq.add(SQ(public=sqs.query.clean(value)), SQ.OR)

                elif "owners_names" in field:
                    owner_sq.add(SQ(owners_names=sqs.query.clean(value)),
                                 SQ.OR)

                elif "discoverable" in field:
                    discoverable_sq.add(
                        SQ(discoverable=sqs.query.clean(value)), SQ.OR)

                elif "published" in field:
                    published_sq.add(SQ(published=sqs.query.clean(value)),
                                     SQ.OR)

                elif 'variable_names' in field:
                    variable_sq.add(SQ(variable_names=sqs.query.clean(value)),
                                    SQ.OR)

                elif 'sample_mediums' in field:
                    sample_medium_sq.add(
                        SQ(sample_mediums=sqs.query.clean(value)), SQ.OR)

                elif 'units_names' in field:
                    units_name_sq.add(SQ(units_names=sqs.query.clean(value)),
                                      SQ.OR)

                else:
                    continue

        if author_sq:
            sqs = sqs.filter(author_sq)
        if subjects_sq:
            sqs = sqs.filter(subjects_sq)
        if resource_sq:
            sqs = sqs.filter(resource_sq)
        if public_sq:
            sqs = sqs.filter(public_sq)
        if owner_sq:
            sqs = sqs.filter(owner_sq)
        if discoverable_sq:
            sqs = sqs.filter(discoverable_sq)
        if published_sq:
            sqs = sqs.filter(published_sq)
        if variable_sq:
            sqs = sqs.filter(variable_sq)
        if sample_medium_sq:
            sqs = sqs.filter(sample_medium_sq)
        if units_name_sq:
            sqs = sqs.filter(units_name_sq)

        return sqs
コード例 #29
0
ファイル: views.py プロジェクト: SpeeDly/partytask
def search(request):
    query = request.GET.get("q", None)
    lat = request.GET.get("lat", None)
    lng = request.GET.get("lng", None)
    budget = request.GET.get("budget", None)
    gender = request.GET.get("gender", None)
    rating = request.GET.get("rating", None)
    date = request.GET.get("date", None)
    hour = request.GET.get("hour", None)
    tags = request.GET.get("tags", None)
    sorted_by = request.GET.get("sorted_by", None)
    start_price = 0
    end_price = 15000
    point = Point(23.31326937672202, 42.68336526966131)
    type_of_order = ['listing_id', '-listing_id', '-likes', '-comments']
    if query:
        query = query.split(" ")
        if tags:
            tags = tags.split(',')
            query = query + tags
        sq = SQ()
        for q in query:
            sq.add(SQ(tags__contains=q), SQ.OR)
            sq.add(SQ(title__contains=q), SQ.OR)
            sq.add(SQ(description__contains=q), SQ.OR)
    else:
        sq = SQ()

    if budget:
        start_price = int(budget.split('-')[0])
        end_price = int(budget.split('-')[1])

    if gender:
        gender = int(gender)
        if gender == 0:
            gender = [0]
        elif gender == 2:
            gender = [1]
        else:
            gender = [0,1,2]
    else:
        gender = [0,1,2]


    # if lat and lng:
    #     lat = float(lat)
    #     lng = float(lng)
        # point = Point(lat, lng)
        
    if date and date.isdigit():
        date = int(date)
    else:
        date = None

    if not (hour is  None or hour == "-1"):
        hour = int(hour)
    else:
        hour = -1

    if rating:
        rating = int(rating)
    else:
        rating = 0

    if sorted_by:
        sorted_by = type_of_order[int(sorted_by)]
    else:
        sorted_by = 'listing_id'


    partial_query = SearchQuerySet().models(Listing).filter(sq)
    partial_query = [l.price for l in partial_query]

    if len(partial_query) >= 2:
        price_list = [min(partial_query), max(partial_query)]
    else:
        price_list = [0, 500]
    print(price_list)

    price_list = [min(price_list), max(price_list)]
    _listings = SearchQuerySet().models(Listing).filter(sq).filter(
                                                    gender__in=gender, 
                                                    price__gte=start_price, 
                                                    price__lte=end_price, 
                                                    status=1, 
                                                    rating__gte=rating)
    print("numbers", len(_listings))
    # .dwithin('location', point, D(km=1500000))

    if date and not (hour is not None and hour == -1):
        ''' hour in seconds is the time in utc seconds from the date to the required time '''
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"), ("wed_start", "wed_end"), ("thurs_start", "thurs_end"), ("fri_start", "fri_end"), ("sat_start", "sat_end"), ("sun_start", "sun_end")]
        
        # get listings IDs which was already filtered and make in format (1,1,3,3,5)
        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1]

        # get the index of the day from the week
        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        # hour in seconds
        hour_in_seconds = 28800 + hour*1800

        # start range is a variable which will be used for the following things: time in seconds from booking start 
        start_range = date + hour_in_seconds

        query = "SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar"
        query += " FROM listings_listing AS listing"
        query += " JOIN artists_artist AS artist ON artist.id = listing.artist_id"
        query += " JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id"
        query += " LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id"
        query += " LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id"
        query += " WHERE listing.id IN ({0})".format(listings_ids)
        query += " AND worktime.{0} <= {1} AND worktime.{2} >= ({1} + listing.duration/1800)".format(week_days[0], hour, week_days[1])
        query += " AND (booking.start_time >= listing.duration + {0}".format(start_range)
        query += " OR booking.end_time <= {0} OR booking.start_time IS NULL)".format(start_range)
        query += " AND (busy.start_time >= listing.duration + {0}".format(start_range)
        query += " OR busy.end_time <= {0} OR busy.start_time IS NULL)".format(start_range)


        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {}
            listing["lat"] = l["lat"]
            listing["lng"] = l["lng"]
            listing["id"] = l["id"]
            listing["picture"] = l["picture_cover"]
            listing["style"] = STYLE_INDEXES[int(l["style"])-1][1]
            listing["title"] = l["title"]
            listing["likes"] = l["likes"]
            listing["price"] = int(l["price"])
            listing["comments"] = l["comments"]
            listing["artist_id"] = l["artist_id"]
            listing["avatar"] = MEDIA_ROOT + l["avatar"][7:]
            listings.append(listing)
        print("1")
    elif date:
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"), ("wed_start", "wed_end"), ("thurs_start", "thurs_end"), ("fri_start", "fri_end"), ("sat_start", "sat_end"), ("sun_start", "sun_end")]
        
        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1]

        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        query = "SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar"
        query += " FROM listings_listing AS listing"
        query += " JOIN artists_artist AS artist ON artist.id = listing.artist_id"
        query += " JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id"
        query += " LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id"
        query += " LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id"
        query += " WHERE listing.id IN ({0})".format(listings_ids)
        query += " AND NOT worktime.{0} = -1".format(week_days[0])

        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {}
            listing["lat"] = l["lat"]
            listing["lng"] = l["lng"]
            listing["id"] = l["id"]
            listing["picture"] = l["picture_cover"]
            listing["style"] = STYLE_INDEXES[int(l["style"])-1][1]
            listing["title"] = l["title"]
            listing["likes"] = l["likes"]
            listing["price"] = int(l["price"])
            listing["comments"] = l["comments"]
            listing["artist_id"] = l["artist_id"]
            listing["avatar"] = MEDIA_ROOT + l["avatar"][7:]
            listings.append(listing)
        print("2")

    else:
        listings = []
        listing = {}
        for l in _listings:
            listing = {}
            listing["lat"] = l.location.x
            listing["lng"] = l.location.y
            listing["id"] = l.listing_id
            listing["picture"] = l.get_picture
            listing["style"] = l.style
            listing["title"] = l.title
            listing["likes"] = l.likes
            listing["price"] = int(l.price)
            listing["comments"] = l.comments
            listing["artist_id"] = l.artist_id
            listing["avatar"] = l.artist_avatar
            listings.append(listing)
        print('3')
    return render(request, 'service/service.html', {"listings": listings, "price_list": price_list})
コード例 #30
0
ファイル: views.py プロジェクト: kahihia/glamfame
def search(request):
    styles = {
        "hairstyle": "hair",
        "nails-design": "nails",
        "make-up": "make up",
    }
    query = styles.get(request.GET.get("q", None), request.GET.get("q", None))
    lat = request.GET.get("lat", None)
    lng = request.GET.get("lng", None)
    budget = request.GET.get("budget", None)
    gender = request.GET.get("gender", None)
    rating = request.GET.get("rating", None)
    date = request.GET.get("date", None)
    hour = request.GET.get("hour", None)
    tags = request.GET.get("tags", None)
    sorted_by = request.GET.get("sorted_by", None)
    point = None
    start_price = 0
    end_price = 15000
    # point = Point(23.31326937672202, 42.68336526966131)
    type_of_order = ['listing_id', '-listing_id', '-likes', '-comments']
    currencies = dict(CURRENCY)
    if query:
        query = query.split(" ")
        if tags:
            tags = tags.split(',')
            query = query + tags
        sq = SQ()
        for q in query:
            sq.add(SQ(tags__contains=q), SQ.OR)
            sq.add(SQ(title__contains=q), SQ.OR)
            sq.add(SQ(description__contains=q), SQ.OR)
    else:
        sq = SQ()

    if budget:
        start_price = int(budget.split('-')[0])
        end_price = int(budget.split('-')[1])

    if gender:
        gender = int(gender)
        if gender == 0:
            gender = [0]
        elif gender == 2:
            gender = [2]
        else:
            gender = [0, 1, 2]
    else:
        gender = [0, 1, 2]

    if lat and lng and lat != 'undefined' and lng != 'undefined':
        print(lat, lng)
        lat = float(lat)
        lng = float(lng)
        point = Point(lat, lng)

    if date and date.isdigit():
        date = int(date)
    else:
        date = None

    if not (hour is None or hour == "-1"):
        hour = int(hour)
    else:
        hour = -1

    if rating:
        rating = int(rating)
    else:
        rating = 0

    if sorted_by:
        sorted_by = type_of_order[int(sorted_by)]
    else:
        sorted_by = 'listing_id'

    partial_query = SearchQuerySet().models(Listing).filter(sq)
    partial_query = [l.price for l in partial_query]

    if len(partial_query) >= 2:
        price_list = [min(partial_query), max(partial_query)]
    else:
        price_list = [0, 500]

    price_list = [min(price_list), max(price_list)]
    if point:
        _listings = SearchQuerySet().models(Listing).filter(sq).filter(
            gender__in=gender,
            price__gte=start_price,
            price__lte=end_price,
            status=1,
            rating__gte=rating).dwithin('location', point, D(km=1500000))
    else:
        _listings = SearchQuerySet().models(Listing).filter(sq).filter(
            gender__in=gender,
            price__gte=start_price,
            price__lte=end_price,
            status=1,
            rating__gte=rating)

    if date and not (hour is not None and hour == -1):
        ''' hour in seconds is the time in utc seconds from the date to the required time '''
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"),
                     ("wed_start", "wed_end"), ("thurs_start", "thurs_end"),
                     ("fri_start", "fri_end"), ("sat_start", "sat_end"),
                     ("sun_start", "sun_end")]

        # get listings IDs which was already filtered and make in format (1,1,3,3,5)
        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1] if listings_ids else 'NULL'

        # get the index of the day from the week
        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        # hour in seconds
        hour_in_seconds = 28800 + hour * 1800

        # start range is a variable which will be used for the following things: time in seconds from booking start
        start_range = date + hour_in_seconds

        query = '''SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.currency, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar, artist_user.first_name as artist_name, salon.id as salon_id, salon.avatar as salon_avatar, salon_user.first_name as salon_name
        FROM listings_listing AS listing
        JOIN artists_artist AS artist ON artist.id = listing.artist_id
        JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id
        LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id
        LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id
        LEFT JOIN salons_salon AS salon ON artist.salon_id = salon.id
        LEFT JOIN auth_user AS salon_user ON salon.user_id = salon_user.id
        LEFT JOIN auth_user AS artist_user ON artist.user_id = artist_user.id
        WHERE listing.id IN ({listings_ids})
        AND worktime.{first_week_day} <= {hour} AND worktime.{second_week_day} >= ({hour} + listing.duration/1800)
        AND (booking.start_time >= listing.duration + {start_range}
        OR booking.end_time <= {start_range} OR booking.start_time IS NULL)
        AND (busy.start_time >= listing.duration + {start_range}
        OR busy.end_time <= {start_range} OR busy.start_time IS NULL)'''.format(
            listings_ids=listings_ids,
            first_week_day=week_days[0],
            hour=hour,
            second_week_day=week_days[1],
            start_range=start_range)

        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {
                "lat":
                l["lat"],
                "lng":
                l["lng"],
                "id":
                l["id"],
                "picture":
                l["picture_cover"],
                "style":
                STYLE_INDEXES[l["style"]][1],
                "title":
                l["title"],
                "likes":
                l["likes"],
                "price":
                l["price"],
                "currency":
                currencies[l["currency"]],
                "comments":
                l["comments"],
                "artist_id":
                l["artist_id"],
                "artist_name":
                l["artist_name"],
                "avatar":
                MEDIA_ROOT + l["avatar"][7:] if l["avatar"] else '',
                "salon_id":
                l["salon_id"],
                "salon_name":
                l["salon_name"],
                "salon_avatar":
                MEDIA_ROOT + l["salon_avatar"][7:] if l["salon_avatar"] else ''
            }
            listings.append(listing)
    elif date:
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"),
                     ("wed_start", "wed_end"), ("thurs_start", "thurs_end"),
                     ("fri_start", "fri_end"), ("sat_start", "sat_end"),
                     ("sun_start", "sun_end")]

        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1] if listings_ids else 'NULL'

        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        query = '''SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.currency, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar, artist_user.first_name as artist_name, salon.id as salon_id, salon.avatar as salon_avatar, salon_user.first_name as salon_name
            FROM listings_listing AS listing
            JOIN artists_artist AS artist ON artist.id = listing.artist_id
            JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id
            LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id
            LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id
            LEFT JOIN salons_salon AS salon ON artist.salon_id = salon.id
            LEFT JOIN auth_user AS salon_user ON salon.user_id = salon_user.id
            LEFT JOIN auth_user AS artist_user ON artist.user_id = artist_user.id
            WHERE listing.id IN ({listings_ids})
            AND NOT worktime.{week_day} = -1'''.format(
            listings_ids=listings_ids, week_day=week_days[0])

        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {
                "lat":
                l["lat"],
                "lng":
                l["lng"],
                "id":
                l["id"],
                "picture":
                l["picture_cover"],
                "style":
                STYLE_INDEXES[l["style"]][1],
                "title":
                l["title"],
                "likes":
                l["likes"],
                "price":
                l["price"],
                "currency":
                currencies[l["currency"]],
                "comments":
                l["comments"],
                "artist_id":
                l["artist_id"],
                "artist_name":
                l["artist_name"],
                "avatar":
                MEDIA_ROOT + l["avatar"][7:] if l["avatar"] else '',
                "salon_id":
                l["salon_id"],
                "salon_name":
                l["salon_name"],
                "salon_avatar":
                MEDIA_ROOT + l["salon_avatar"][7:] if l["salon_avatar"] else ''
            }
            listings.append(listing)

    else:
        listings = []
        listing = {}
        for l in _listings:
            listing = {
                "lat": l.location.x,
                "lng": l.location.y,
                "id": l.listing_id,
                "picture": l.get_picture,
                "style": l.style,
                "title": l.title,
                "likes": l.likes,
                "price": l.price,
                "currency": currencies[l.currency],
                "comments": l.comments,
                "artist_id": l.artist_id,
                "artist_name": l.artist_name,
                "avatar": l.artist_avatar,
                "salon_id": l.salon_id,
                "salon_name": l.salon_name,
                "salon_avatar": l.salon_avatar,
            }
            listings.append(listing)

    artists_ids = [l["artist_id"] for l in listings]
    artists_ratings = Review.objects.filter(
        artist_id__in=artists_ids).values("artist_id").annotate(
            average_rating=Avg('rating'))
    final_map = {}
    for e in artists_ratings:
        final_map[e["artist_id"]] = e["average_rating"]

    for l in listings:
        rating = final_map.get(l['artist_id'], None)
        l["artist_rating"] = rating

    return render(request, 'service/service.html', {
        "listings": listings,
        "price_list": price_list
    })
コード例 #31
0
ファイル: views.py プロジェクト: SpeeDly/glamfame
def search(request):
    styles = {
        "hairstyle": "hair",
        "nails-design": "nails",
        "make-up": "make up",
    }
    query = styles.get(request.GET.get("q", None), request.GET.get("q", None))
    lat = request.GET.get("lat", None)
    lng = request.GET.get("lng", None)
    budget = request.GET.get("budget", None)
    gender = request.GET.get("gender", None)
    rating = request.GET.get("rating", None)
    date = request.GET.get("date", None)
    hour = request.GET.get("hour", None)
    tags = request.GET.get("tags", None)
    sorted_by = request.GET.get("sorted_by", None)
    point = None
    start_price = 0
    end_price = 15000
    # point = Point(23.31326937672202, 42.68336526966131)
    type_of_order = ['listing_id', '-listing_id', '-likes', '-comments']
    currencies = dict(CURRENCY)
    if query:
        query = query.split(" ")
        if tags:
            tags = tags.split(',')
            query = query + tags
        sq = SQ()
        for q in query:
            sq.add(SQ(tags__contains=q), SQ.OR)
            sq.add(SQ(title__contains=q), SQ.OR)
            sq.add(SQ(description__contains=q), SQ.OR)
    else:
        sq = SQ()

    if budget:
        start_price = int(budget.split('-')[0])
        end_price = int(budget.split('-')[1])

    if gender:
        gender = int(gender)
        if gender == 0:
            gender = [0]
        elif gender == 2:
            gender = [2]
        else:
            gender = [0, 1, 2]
    else:
        gender = [0, 1, 2]

    if lat and lng and lat != 'undefined' and lng != 'undefined':
        print(lat, lng)
        lat = float(lat)
        lng = float(lng)
        point = Point(lat, lng)

    if date and date.isdigit():
        date = int(date)
    else:
        date = None

    if not (hour is None or hour == "-1"):
        hour = int(hour)
    else:
        hour = -1

    if rating:
        rating = int(rating)
    else:
        rating = 0

    if sorted_by:
        sorted_by = type_of_order[int(sorted_by)]
    else:
        sorted_by = 'listing_id'

    partial_query = SearchQuerySet().models(Listing).filter(sq)
    partial_query = [l.price for l in partial_query]

    if len(partial_query) >= 2:
        price_list = [min(partial_query), max(partial_query)]
    else:
        price_list = [0, 500]

    price_list = [min(price_list), max(price_list)]
    if point:
        _listings = SearchQuerySet().models(Listing).filter(sq).filter(
            gender__in=gender,
            price__gte=start_price,
            price__lte=end_price,
            status=1,
            rating__gte=rating).dwithin('location', point, D(km=1500000))
    else:
        _listings = SearchQuerySet().models(Listing).filter(sq).filter(
            gender__in=gender,
            price__gte=start_price,
            price__lte=end_price,
            status=1,
            rating__gte=rating)

    if date and not (hour is not None and hour == -1):
        ''' hour in seconds is the time in utc seconds from the date to the required time '''
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"), ("wed_start", "wed_end"), ("thurs_start", "thurs_end"), ("fri_start", "fri_end"), ("sat_start", "sat_end"), ("sun_start", "sun_end")]

        # get listings IDs which was already filtered and make in format (1,1,3,3,5)
        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1] if listings_ids else 'NULL'

        # get the index of the day from the week
        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        # hour in seconds
        hour_in_seconds = 28800 + hour*1800

        # start range is a variable which will be used for the following things: time in seconds from booking start
        start_range = date + hour_in_seconds

        query = '''SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.currency, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar, artist_user.first_name as artist_name, salon.id as salon_id, salon.avatar as salon_avatar, salon_user.first_name as salon_name
        FROM listings_listing AS listing
        JOIN artists_artist AS artist ON artist.id = listing.artist_id
        JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id
        LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id
        LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id
        LEFT JOIN salons_salon AS salon ON artist.salon_id = salon.id
        LEFT JOIN auth_user AS salon_user ON salon.user_id = salon_user.id
        LEFT JOIN auth_user AS artist_user ON artist.user_id = artist_user.id
        WHERE listing.id IN ({listings_ids})
        AND worktime.{first_week_day} <= {hour} AND worktime.{second_week_day} >= ({hour} + listing.duration/1800)
        AND (booking.start_time >= listing.duration + {start_range}
        OR booking.end_time <= {start_range} OR booking.start_time IS NULL)
        AND (busy.start_time >= listing.duration + {start_range}
        OR busy.end_time <= {start_range} OR busy.start_time IS NULL)'''.format(listings_ids=listings_ids, first_week_day=week_days[0], hour=hour, second_week_day=week_days[1], start_range=start_range)

        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {
                "lat": l["lat"],
                "lng": l["lng"],
                "id": l["id"],
                "picture": l["picture_cover"],
                "style": STYLE_INDEXES[l["style"]][1],
                "title": l["title"],
                "likes": l["likes"],
                "price": l["price"],
                "currency": currencies[l["currency"]],
                "comments": l["comments"],
                "artist_id": l["artist_id"],
                "artist_name": l["artist_name"],
                "avatar": MEDIA_ROOT + l["avatar"][7:] if l["avatar"] else '',
                "salon_id": l["salon_id"],
                "salon_name": l["salon_name"],
                "salon_avatar": MEDIA_ROOT + l["salon_avatar"][7:] if l["salon_avatar"] else ''
            }
            listings.append(listing)
    elif date:
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"), ("wed_start", "wed_end"), ("thurs_start", "thurs_end"), ("fri_start", "fri_end"), ("sat_start", "sat_end"), ("sun_start", "sun_end")]

        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1] if listings_ids else 'NULL'

        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        query = '''SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.currency, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar, artist_user.first_name as artist_name, salon.id as salon_id, salon.avatar as salon_avatar, salon_user.first_name as salon_name
            FROM listings_listing AS listing
            JOIN artists_artist AS artist ON artist.id = listing.artist_id
            JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id
            LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id
            LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id
            LEFT JOIN salons_salon AS salon ON artist.salon_id = salon.id
            LEFT JOIN auth_user AS salon_user ON salon.user_id = salon_user.id
            LEFT JOIN auth_user AS artist_user ON artist.user_id = artist_user.id
            WHERE listing.id IN ({listings_ids})
            AND NOT worktime.{week_day} = -1'''.format(listings_ids=listings_ids, week_day=week_days[0])

        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {
                "lat": l["lat"],
                "lng": l["lng"],
                "id": l["id"],
                "picture": l["picture_cover"],
                "style": STYLE_INDEXES[l["style"]][1],
                "title": l["title"],
                "likes": l["likes"],
                "price": l["price"],
                "currency": currencies[l["currency"]],
                "comments": l["comments"],
                "artist_id": l["artist_id"],
                "artist_name": l["artist_name"],
                "avatar": MEDIA_ROOT + l["avatar"][7:] if l["avatar"] else '',
                "salon_id": l["salon_id"],
                "salon_name": l["salon_name"],
                "salon_avatar": MEDIA_ROOT + l["salon_avatar"][7:] if l["salon_avatar"] else ''
            }
            listings.append(listing)

    else:
        listings = []
        listing = {}
        for l in _listings:
            listing = {
                "lat": l.location.x,
                "lng": l.location.y,
                "id": l.listing_id,
                "picture": l.get_picture,
                "style": l.style,
                "title": l.title,
                "likes": l.likes,
                "price": l.price,
                "currency": currencies[l.currency],
                "comments": l.comments,
                "artist_id": l.artist_id,
                "artist_name": l.artist_name,
                "avatar": l.artist_avatar,
                "salon_id": l.salon_id,
                "salon_name": l.salon_name,
                "salon_avatar": l.salon_avatar,
            }
            listings.append(listing)

    artists_ids = [l["artist_id"] for l in listings]
    artists_ratings = Review.objects.filter(artist_id__in=artists_ids).values("artist_id").annotate(average_rating=Avg('rating'))
    final_map = {}
    for e in artists_ratings:
        final_map[e["artist_id"]] = e["average_rating"]

    for l in listings:
        rating = final_map.get(l['artist_id'], None)
        l["artist_rating"] = rating

    return render(request, 'service/service.html', {"listings": listings, "price_list": price_list})
コード例 #32
0
def filter_annotations(request, sqs):
    # The current situation makes no sense: authenticated users can see fewer
    # annotations than anonymous users. Let's repair this by returning an
    # empty queryset if the current user has not logged in.
    if not request.user.is_authenticated():
        return sqs.none()
    # category
    category = request.GET.get('category')
    if category:
        sqs = sqs.filter(category__exact=category)
    # location
    bbox = request.GET.get('bbox')
    bottom_left = request.GET.get('bottom_left')
    top_right = request.GET.get('top_right')
    north = request.GET.get('north')
    east = request.GET.get('east')
    south = request.GET.get('south')
    west = request.GET.get('west')
    if bbox:
        if bbox == 'test':
            bottom_left = '48.0', '4.0'
            top_right = '52.0', '10.0'
        else:
            # lon_min, lat_min, lon_max, lat_max
            # west, south, east, north
            x_min, y_min, x_max, y_max = bbox.split(',')
            bottom_left = y_min, x_min
            top_right = y_max, x_max
    elif bottom_left and top_right:
        bottom_left = bottom_left.split(',')
        top_right = top_right.split(',')
    elif north and east and south and west:
        bottom_left = south, west
        top_right = north, east
    else:
        bottom_left = None
        top_right = None
    if bottom_left and top_right:
        bottom_left = Point(float(bottom_left[0]), float(bottom_left[1]))
        top_right = Point(float(top_right[0]), float(top_right[1]))
        sqs = sqs.within('location', bottom_left, top_right)
    # As decided during the UAT on 2014-09-09: no more private annotations,
    # all annotations will be public. Hence, we don't need filtering.
    # Well, not entirely: public for authenticated users only.
    ### user
    ##username = request.user.username
    ### allow username overriding in DEBUG mode
    ### this is a possible security leak
    ##username_override = request.GET.get('username_override')
    ##if settings.DEBUG and username_override:
    ##    username = username_override
    ##sqs = sqs.filter(
    ##    # either private and linked to the current user
    ##    SQ(username__exact=username, visibility=Visibility.PRIVATE) |
    ##    # or public
    ##    SQ(visibility=Visibility.PUBLIC)
    ##)
    # relation to model instances
    the_model_name = request.GET.get('model_name')
    the_model_pk = request.GET.get('model_pk')
    if the_model_name and the_model_pk:
        sqs = sqs.filter(the_model_name__exact=the_model_name,
                         the_model_pk__exact=the_model_pk)
    else:
        # allow multiple models and pks
        model_names_pks = request.GET.get('model_names_pks')
        if model_names_pks:
            model_names_pks = model_names_pks.split(';')
            sq = SQ()
            for model_name_pk in model_names_pks:
                model_name, model_pk = model_name_pk.split(',')
                sq.add(
                    SQ(the_model_name__exact=model_name,
                       the_model_pk__exact=model_pk), SQ.OR)
            sqs = sqs.filter(sq)
    # date range
    datetime_from = request.GET.get('datetime_from')
    if datetime_from:
        datetime_from = dateutil.parser.parse(datetime_from)
        sqs = sqs.filter(datetime_from__gte=datetime_from)
    datetime_until = request.GET.get('datetime_until')
    if datetime_until:
        datetime_until = dateutil.parser.parse(datetime_until)
        sqs = sqs.filter(datetime_until__lte=datetime_until)
    # full text
    text = request.GET.get('text')
    if text:
        sqs = sqs.filter(text__contains=text)
    tags = request.GET.get('tags')
    if tags:
        sqs = sqs.filter(tags__contains=tags)

    return sqs
コード例 #33
0
ファイル: views.py プロジェクト: SpeeDly/partytask
def search(request):
    query = request.GET.get("q", None)
    lat = request.GET.get("lat", None)
    lng = request.GET.get("lng", None)
    budget = request.GET.get("budget", None)
    gender = request.GET.get("gender", None)
    rating = request.GET.get("rating", None)
    date = request.GET.get("date", None)
    hour = request.GET.get("hour", None)
    tags = request.GET.get("tags", None)
    sorted_by = request.GET.get("sorted_by", None)
    start_price = 0
    end_price = 15000
    point = Point(23.31326937672202, 42.68336526966131)
    type_of_order = ['listing_id', '-listing_id', '-likes', '-comments']
    if query:
        query = query.split(" ")
        if tags:
            tags = tags.split(',')
            query = query + tags
        sq = SQ()
        for q in query:
            sq.add(SQ(tags__contains=q), SQ.OR)
            sq.add(SQ(title__contains=q), SQ.OR)
            sq.add(SQ(description__contains=q), SQ.OR)
    else:
        sq = SQ()

    if budget:
        start_price = int(budget.split('-')[0])
        end_price = int(budget.split('-')[1])

    if gender:
        gender = int(gender)
        if gender == 0:
            gender = [0]
        elif gender == 2:
            gender = [1]
        else:
            gender = [0, 1, 2]
    else:
        gender = [0, 1, 2]

    # if lat and lng:
    #     lat = float(lat)
    #     lng = float(lng)
    # point = Point(lat, lng)

    if date and date.isdigit():
        date = int(date)
    else:
        date = None

    if not (hour is None or hour == "-1"):
        hour = int(hour)
    else:
        hour = -1

    if rating:
        rating = int(rating)
    else:
        rating = 0

    if sorted_by:
        sorted_by = type_of_order[int(sorted_by)]
    else:
        sorted_by = 'listing_id'

    partial_query = SearchQuerySet().models(Listing).filter(sq)
    partial_query = [l.price for l in partial_query]

    if len(partial_query) >= 2:
        price_list = [min(partial_query), max(partial_query)]
    else:
        price_list = [0, 500]
    print(price_list)

    price_list = [min(price_list), max(price_list)]
    _listings = SearchQuerySet().models(Listing).filter(sq).filter(
        gender__in=gender,
        price__gte=start_price,
        price__lte=end_price,
        status=1,
        rating__gte=rating)
    print("numbers", len(_listings))
    # .dwithin('location', point, D(km=1500000))

    if date and not (hour is not None and hour == -1):
        ''' hour in seconds is the time in utc seconds from the date to the required time '''
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"),
                     ("wed_start", "wed_end"), ("thurs_start", "thurs_end"),
                     ("fri_start", "fri_end"), ("sat_start", "sat_end"),
                     ("sun_start", "sun_end")]

        # get listings IDs which was already filtered and make in format (1,1,3,3,5)
        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1]

        # get the index of the day from the week
        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        # hour in seconds
        hour_in_seconds = 28800 + hour * 1800

        # start range is a variable which will be used for the following things: time in seconds from booking start
        start_range = date + hour_in_seconds

        query = "SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar"
        query += " FROM listings_listing AS listing"
        query += " JOIN artists_artist AS artist ON artist.id = listing.artist_id"
        query += " JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id"
        query += " LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id"
        query += " LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id"
        query += " WHERE listing.id IN ({0})".format(listings_ids)
        query += " AND worktime.{0} <= {1} AND worktime.{2} >= ({1} + listing.duration/1800)".format(
            week_days[0], hour, week_days[1])
        query += " AND (booking.start_time >= listing.duration + {0}".format(
            start_range)
        query += " OR booking.end_time <= {0} OR booking.start_time IS NULL)".format(
            start_range)
        query += " AND (busy.start_time >= listing.duration + {0}".format(
            start_range)
        query += " OR busy.end_time <= {0} OR busy.start_time IS NULL)".format(
            start_range)

        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {}
            listing["lat"] = l["lat"]
            listing["lng"] = l["lng"]
            listing["id"] = l["id"]
            listing["picture"] = l["picture_cover"]
            listing["style"] = STYLE_INDEXES[int(l["style"]) - 1][1]
            listing["title"] = l["title"]
            listing["likes"] = l["likes"]
            listing["price"] = int(l["price"])
            listing["comments"] = l["comments"]
            listing["artist_id"] = l["artist_id"]
            listing["avatar"] = MEDIA_ROOT + l["avatar"][7:]
            listings.append(listing)
        print("1")
    elif date:
        work_days = [("mon_start", "mon_end"), ("tues_start", "tues_end"),
                     ("wed_start", "wed_end"), ("thurs_start", "thurs_end"),
                     ("fri_start", "fri_end"), ("sat_start", "sat_end"),
                     ("sun_start", "sun_end")]

        listings_ids = [l.listing_id for l in _listings]
        listings_ids = str(listings_ids)[1:-1]

        week_days = datetime.datetime.fromtimestamp(date).strftime('%w')
        week_days = work_days[int(week_days)]

        query = "SELECT DISTINCT listing.id, listing.title, listing.likes, listing.price, listing.artist_id, listing.comments, listing.picture_cover, artist.lat, artist.lng, artist.style, artist.avatar"
        query += " FROM listings_listing AS listing"
        query += " JOIN artists_artist AS artist ON artist.id = listing.artist_id"
        query += " JOIN artists_worktime AS worktime ON worktime.artist_id = artist.id"
        query += " LEFT JOIN booking_booking AS booking ON booking.artist_id = artist.id"
        query += " LEFT JOIN artists_busy AS busy ON busy.artist_id = artist.id"
        query += " WHERE listing.id IN ({0})".format(listings_ids)
        query += " AND NOT worktime.{0} = -1".format(week_days[0])

        cursor = connection.cursor()
        cursor.execute(query)
        _listings = dictfetchall(cursor)

        listings = []
        listing = {}
        for l in _listings:
            listing = {}
            listing["lat"] = l["lat"]
            listing["lng"] = l["lng"]
            listing["id"] = l["id"]
            listing["picture"] = l["picture_cover"]
            listing["style"] = STYLE_INDEXES[int(l["style"]) - 1][1]
            listing["title"] = l["title"]
            listing["likes"] = l["likes"]
            listing["price"] = int(l["price"])
            listing["comments"] = l["comments"]
            listing["artist_id"] = l["artist_id"]
            listing["avatar"] = MEDIA_ROOT + l["avatar"][7:]
            listings.append(listing)
        print("2")

    else:
        listings = []
        listing = {}
        for l in _listings:
            listing = {}
            listing["lat"] = l.location.x
            listing["lng"] = l.location.y
            listing["id"] = l.listing_id
            listing["picture"] = l.get_picture
            listing["style"] = l.style
            listing["title"] = l.title
            listing["likes"] = l.likes
            listing["price"] = int(l.price)
            listing["comments"] = l.comments
            listing["artist_id"] = l.artist_id
            listing["avatar"] = l.artist_avatar
            listings.append(listing)
        print('3')
    return render(request, 'service/service.html', {
        "listings": listings,
        "price_list": price_list
    })
コード例 #34
0
    def search(self):
        self.parse_error = None  # error return from parser
        sqs = self.searchqueryset.all().filter(replaced=False)
        if self.cleaned_data.get('q'):
            # The prior code corrected for an failed match of complete words, as documented
            # in issue #2308. This version instead uses an advanced query syntax in which
            # "word" indicates an exact match and the bare word indicates a stemmed match.
            cdata = self.cleaned_data.get('q')
            try:
                parser = ParseSQ()
                parsed = parser.parse(cdata)
                sqs = sqs.filter(parsed)
            except ValueError as e:
                sqs = self.searchqueryset.none()
                self.parse_error = "Value error: {}. No matches. Please try again".format(e.value)
                return sqs
            except MatchingBracketsNotFoundError as e:
                sqs = self.searchqueryset.none()
                self.parse_error = "{} No matches. Please try again.".format(e.value)
                return sqs
            except MalformedDateError as e:
                sqs = self.searchqueryset.none()
                self.parse_error = "{} No matches. Please try again.".format(e.value)
                return sqs
            except FieldNotRecognizedError as e:
                sqs = self.searchqueryset.none()
                self.parse_error = \
                    ("{} Field delimiters include title, contributor, subject, etc. " +
                     "Please try again.")\
                    .format(e.value)
                return sqs
            except InequalityNotAllowedError as e:
                sqs = self.searchqueryset.none()
                self.parse_error = "{} No matches. Please try again.".format(e.value)
                return sqs

        geo_sq = None
        if self.cleaned_data['NElng'] and self.cleaned_data['SWlng']:
            if float(self.cleaned_data['NElng']) > float(self.cleaned_data['SWlng']):
                geo_sq = SQ(east__lte=float(self.cleaned_data['NElng']))
                geo_sq.add(SQ(east__gte=float(self.cleaned_data['SWlng'])), SQ.AND)
            else:
                geo_sq = SQ(east__gte=float(self.cleaned_data['SWlng']))
                geo_sq.add(SQ(east__lte=float(180)), SQ.OR)
                geo_sq.add(SQ(east__lte=float(self.cleaned_data['NElng'])), SQ.AND)
                geo_sq.add(SQ(east__gte=float(-180)), SQ.AND)

        if self.cleaned_data['NElat'] and self.cleaned_data['SWlat']:
            # latitude might be specified without longitude
            if geo_sq is None:
                geo_sq = SQ(north__lte=float(self.cleaned_data['NElat']))
            else:
                geo_sq.add(SQ(north__lte=float(self.cleaned_data['NElat'])), SQ.AND)
            geo_sq.add(SQ(north__gte=float(self.cleaned_data['SWlat'])), SQ.AND)

        if geo_sq is not None:
            sqs = sqs.filter(geo_sq)

        # Check to see if a start_date was chosen.
        start_date = self.cleaned_data['start_date']
        end_date = self.cleaned_data['end_date']

        # allow overlapping ranges
        # cs < s < ce OR s < cs => s < ce
        # AND
        # cs < e < ce OR e > ce => cs < e
        if start_date and end_date:
            sqs = sqs.filter(SQ(end_date__gte=start_date) &
                             SQ(start_date__lte=end_date))
        elif start_date:
            sqs = sqs.filter(SQ(end_date__gte=start_date))

        elif end_date:
            sqs = sqs.filter(SQ(start_date__lte=end_date))

        if self.cleaned_data['coverage_type']:
            sqs = sqs.filter(coverage_types__in=[self.cleaned_data['coverage_type']])

        creator_sq = None
        contributor_sq = None
        owner_sq = None
        subject_sq = None
        resource_type_sq = None
        availability_sq = None

        # We need to process each facet to ensure that the field name and the
        # value are quoted correctly and separately:

        for facet in self.selected_facets:
            if ":" not in facet:
                continue

            field, value = facet.split(":", 1)
            value = sqs.query.clean(value)

            if value:
                if "creator" in field:
                    if creator_sq is None:
                        creator_sq = SQ(creator__exact=value)
                    else:
                        creator_sq.add(SQ(creator__exact=value), SQ.OR)

                if "contributor" in field:
                    if contributor_sq is None:
                        contributor_sq = SQ(contributor__exact=value)
                    else:
                        contributor_sq.add(SQ(contributor__exact=value), SQ.OR)

                elif "owner" in field:
                    if owner_sq is None:
                        owner_sq = SQ(owner__exact=value)
                    else:
                        owner_sq.add(SQ(owner__exact=value), SQ.OR)

                elif "subject" in field:
                    if subject_sq is None:
                        subject_sq = SQ(subject__exact=value)
                    else:
                        subject_sq.add(SQ(subject__exact=value), SQ.OR)

                elif "resource_type" in field:
                    if resource_type_sq is None:
                        resource_type_sq = SQ(resource_type__exact=value)
                    else:
                        resource_type_sq.add(SQ(resource_type__exact=value), SQ.OR)

                elif "availability" in field:
                    if availability_sq is None:
                        availability_sq = SQ(availability__exact=value)
                    else:
                        availability_sq.add(SQ(availability__exact=value), SQ.OR)

                else:
                    continue

        if creator_sq is not None:
            sqs = sqs.filter(creator_sq)
        if contributor_sq is not None:
            sqs = sqs.filter(contributor_sq)
        if owner_sq is not None:
            sqs = sqs.filter(owner_sq)
        if subject_sq is not None:
            sqs = sqs.filter(subject_sq)
        if resource_type_sq is not None:
            sqs = sqs.filter(resource_type_sq)
        if availability_sq is not None:
            sqs = sqs.filter(availability_sq)

        return sqs
コード例 #35
0
    def search(self):
        if not self.cleaned_data.get('q'):
            sqs = self.searchqueryset.all().filter(is_replaced_by=False)
        else:
            # This corrects for an failed match of complete words, as documented in issue #2308.
            # The text__startswith=cdata matches stemmed words in documents with an unstemmed cdata.
            # The text=cdata matches stemmed words after stemming cdata as well.
            # The stem of "Industrial", according to the aggressive default stemmer, is "industri".
            # Thus "Industrial" does not match "Industrial" in the document according to
            # startswith, but does match according to text=cdata.
            cdata = self.cleaned_data.get('q')
            sqs = self.searchqueryset.all()\
                .filter(SQ(text__startswith=cdata) | SQ(text=cdata))\
                .filter(is_replaced_by=False)

        geo_sq = None
        if self.cleaned_data['NElng'] and self.cleaned_data['SWlng']:
            if float(self.cleaned_data['NElng']) > float(self.cleaned_data['SWlng']):
                geo_sq = SQ(coverage_east__lte=float(self.cleaned_data['NElng']))
                geo_sq.add(SQ(coverage_east__gte=float(self.cleaned_data['SWlng'])), SQ.AND)
            else:
                geo_sq = SQ(coverage_east__gte=float(self.cleaned_data['SWlng']))
                geo_sq.add(SQ(coverage_east__lte=float(180)), SQ.OR)
                geo_sq.add(SQ(coverage_east__lte=float(self.cleaned_data['NElng'])), SQ.AND)
                geo_sq.add(SQ(coverage_east__gte=float(-180)), SQ.AND)

        if self.cleaned_data['NElat'] and self.cleaned_data['SWlat']:
            # latitude might be specified without longitude
            if geo_sq is None:
                geo_sq = SQ(coverage_north__lte=float(self.cleaned_data['NElat']))
            else:
                geo_sq.add(SQ(coverage_north__lte=float(self.cleaned_data['NElat'])), SQ.AND)
            geo_sq.add(SQ(coverage_north__gte=float(self.cleaned_data['SWlat'])), SQ.AND)

        if geo_sq is not None:
            sqs = sqs.filter(geo_sq)

        # Check to see if a start_date was chosen.
        if self.cleaned_data['start_date']:
            sqs = sqs.filter(coverage_start_date__gte=self.cleaned_data['start_date'])

        # Check to see if an end_date was chosen.
        if self.cleaned_data['end_date']:
            sqs = sqs.filter(coverage_end_date__lte=self.cleaned_data['end_date'])

        if self.cleaned_data['coverage_type']:
            sqs = sqs.filter(coverage_types__in=[self.cleaned_data['coverage_type']])

        authors_sq = None
        subjects_sq = None
        resource_type_sq = None
        public_sq = None
        owners_names_sq = None
        discoverable_sq = None
        published_sq = None
        variable_names_sq = None
        sample_mediums_sq = None
        units_names_sq = None

        # We need to process each facet to ensure that the field name and the
        # value are quoted correctly and separately:

        for facet in self.selected_facets:
            if ":" not in facet:
                continue

            field, value = facet.split(":", 1)
            value = sqs.query.clean(value)

            if value:
                if "creators" in field:
                    if authors_sq is None:
                        authors_sq = SQ(creators=value)
                    else:
                        authors_sq.add(SQ(creators=value), SQ.OR)

                elif "subjects" in field:
                    if subjects_sq is None:
                        subjects_sq = SQ(subjects=value)
                    else:
                        subjects_sq.add(SQ(subjects=value), SQ.OR)

                elif "resource_type" in field:
                    if resource_type_sq is None:
                        resource_type_sq = SQ(resource_type=value)
                    else:
                        resource_type_sq.add(SQ(resource_type=value), SQ.OR)

                elif "public" in field:
                    if public_sq is None:
                        public_sq = SQ(public=value)
                    else:
                        public_sq.add(SQ(public=value), SQ.OR)

                elif "owners_names" in field:
                    if owners_names_sq is None:
                        owners_names_sq = SQ(owners_names=value)
                    else:
                        owners_names_sq.add(SQ(owners_names=value), SQ.OR)

                elif "discoverable" in field:
                    if discoverable_sq is None:
                        discoverable_sq = SQ(discoverable=value)
                    else:
                        discoverable_sq.add(SQ(discoverable=value), SQ.OR)

                elif "published" in field:
                    if published_sq is None:
                        published_sq = SQ(published=value)
                    else:
                        published_sq.add(SQ(published=value), SQ.OR)

                elif 'variable_names' in field:
                    if variable_names_sq is None:
                        variable_names_sq = SQ(variable_names=value)
                    else:
                        variable_names_sq.add(SQ(variable_names=value), SQ.OR)

                elif 'sample_mediums' in field:
                    if sample_mediums_sq is None:
                        sample_mediums_sq = SQ(sample_mediums=value)
                    else:
                        sample_mediums_sq.add(SQ(sample_mediums=value), SQ.OR)

                elif 'units_names' in field:
                    if units_names_sq is None:
                        units_names_sq = SQ(units_names=value)
                    else:
                        units_names_sq.add(SQ(units_names=value), SQ.OR)

                else:
                    continue

        if authors_sq is not None:
            sqs = sqs.filter(authors_sq)
        if subjects_sq is not None:
            sqs = sqs.filter(subjects_sq)
        if resource_type_sq is not None:
            sqs = sqs.filter(resource_type_sq)
        if public_sq is not None:
            sqs = sqs.filter(public_sq)
        if owners_names_sq is not None:
            sqs = sqs.filter(owners_names_sq)
        if discoverable_sq is not None:
            sqs = sqs.filter(discoverable_sq)
        if published_sq is not None:
            sqs = sqs.filter(published_sq)
        if variable_names_sq is not None:
            sqs = sqs.filter(variable_names_sq)
        if sample_mediums_sq is not None:
            sqs = sqs.filter(sample_mediums_sq)
        if units_names_sq is not None:
            sqs = sqs.filter(units_names_sq)

        return sqs
コード例 #36
0
    def apply_filter(request, ignore_bbox=False):
        # get records with same taxon
        try:
            request_data = request.GET
        except AttributeError:
            request_data = request

        sqs = SearchQuerySet()

        query_value = request_data.get('search')
        if query_value:
            clean_query = sqs.query.clean(query_value)
            settings.ELASTIC_MIN_SCORE = 1.5
            results = sqs.filter(original_species_name=clean_query).models(
                BiologicalCollectionRecord, Taxon).order_by('-_score')
        else:
            settings.ELASTIC_MIN_SCORE = 0
            results = sqs.all().models(BiologicalCollectionRecord)

        taxon = request_data.get('taxon', None)
        if taxon:
            results = sqs.filter(
                taxon_gbif=taxon).models(BiologicalCollectionRecord)

        results = results.filter(validated=True)
        # get by bbox
        if not ignore_bbox:
            bbox = request_data.get('bbox', None)
            if bbox:
                bbox_array = bbox.split(',')
                downtown_bottom_left = Point(float(bbox_array[1]),
                                             float(bbox_array[0]))

                downtown_top_right = Point(float(bbox_array[3]),
                                           float(bbox_array[2]))

                results = results.within('location_center',
                                         downtown_bottom_left,
                                         downtown_top_right)

        # additional filters
        # query by collectors
        query_collector = request_data.get('collector')
        if query_collector:
            qs_collector = SQ()
            qs = json.loads(query_collector)
            for query in qs:
                qs_collector.add(SQ(collector=query), SQ.OR)
            results = results.filter(qs_collector)

        # query by category
        query_category = request_data.get('category')
        if query_category:
            qs_category = SQ()
            qs = json.loads(query_category)
            for query in qs:
                qs_category.add(SQ(category=query), SQ.OR)
            results = results.filter(qs_category)

        # query by year from
        year_from = request_data.get('yearFrom')
        if year_from:
            clean_query_year_from = sqs.query.clean(year_from)
            results = results.filter(
                collection_date_year__gte=clean_query_year_from)

        # query by year to
        year_to = request_data.get('yearTo')
        if year_to:
            clean_query_year_to = sqs.query.clean(year_to)
            results = results.filter(
                collection_date_year__lte=clean_query_year_to)

        # query by months
        months = request_data.get('months')
        if months:
            qs = months.split(',')
            qs_month = SQ()
            for month in qs:
                clean_query_month = sqs.query.clean(month)
                qs_month.add(SQ(collection_date_month=clean_query_month),
                             SQ.OR)
            results = results.filter(qs_month)
        return results
コード例 #37
0
ファイル: engines.py プロジェクト: 0lmer/searchengine
 def _get_se_objects(self, word_list, operator=SQ.AND):
     sq = SQ()
     for word in word_list:
         sq.add(SQ(content=word), operator)
     sqs = SearchQuerySet().filter(sq)
     return sqs