Exemple #1
0
def _search_by_name(word, cursor_str, limit):
    """Returns TaskResultSummary in -created_ts order containing the word."""
    cursor = search.Cursor(web_safe_string=cursor_str, per_result=True)
    index = search.Index(name='requests')

    def item_to_id(item):
        for field in item.fields:
            if field.name == 'id':
                return field.value

    # The code is structured to handle incomplete entities but still return
    # 'limit' items. This is done by fetching a few more entities than necessary,
    # then keeping track of the cursor per item so the right cursor can be
    # returned.
    opts = search.QueryOptions(limit=limit + 5, cursor=cursor)
    results = index.search(search.Query('name:%s' % word, options=opts))
    result_summary_keys = []
    cursors = []
    for item in results.results:
        value = item_to_id(item)
        if value:
            result_summary_keys.append(
                task_pack.unpack_result_summary_key(value))
            cursors.append(item.cursor)

    # Handle None result value. See make_request() for details about how this can
    # happen.
    tasks = []
    cursor = None
    for task, c in zip(ndb.get_multi(result_summary_keys), cursors):
        if task:
            cursor = c
            tasks.append(task)
            if len(tasks) == limit:
                # Drop the rest.
                break
    else:
        if len(cursors) == limit + 5:
            while len(tasks) < limit:
                # Go into the slow path, seems like we got a lot of corrupted items.
                opts = search.QueryOptions(limit=limit - len(tasks) + 5,
                                           cursor=cursor)
                results = index.search(
                    search.Query('name:%s' % word, options=opts))
                if not results.results:
                    # Nothing else.
                    cursor = None
                    break
                for item in results.results:
                    value = item_to_id(item)
                    if value:
                        cursor = item.cursor
                        task = task_pack.unpack_result_summary_key(value).get()
                        if task:
                            tasks.append(task)
                            if len(tasks) == limit:
                                break

    cursor_str = cursor.web_safe_string if cursor else None
    return tasks, cursor_str
Exemple #2
0
    def get(self):
        limit = self.request.get('limit', 100)
        building_name = self.request.get('building_name', None)
        if building_name is not None:
            #GET /maps/:building_name(:level_name)(:floor_number)
            #build the query
            level_name = self.request.get('level_name', None)
            floor_number = self.request.get('floor_number', None)
            query_string = "building_name: " + building_name
            if level_name is not None:
                query_string += " AND level_name: " + level_name
            if floor_number is not None:
                query_string += " AND floor_number = " + floor_number
            #execute it
            query_options = search.QueryOptions(limit=limit)
            self.execute_query_and_send_results(query_string, query_options)
            return

        lat = self.request.get('lat', None)
        lon = self.request.get('lon', None)
        if lat is not None and lon is not None:
            #GET /maps/:lat:lon(:radius)
            center = search.GeoPoint(float(lat), float(lon))
            radius = int(self.request.get('radius', 500))
            query_string = "distance(location, geopoint(%f,%f)) < %d" % (
                center.latitude, center.longitude, radius)
            query_options = search.QueryOptions(limit=limit)
            self.execute_query_and_send_results(query_string, query_options)
            return
Exemple #3
0
def query_per_document_cursor(index, query_string):
    cursor = search.Cursor(per_result=True)

    # Build the query using the cursor.
    options = search.QueryOptions(cursor=cursor)
    query = search.Query(query_string=query_string, options=options)

    # Get the results.
    results = index.search(query)

    document_cursor = None
    for document in results:
        # discover some document of interest and grab its cursor, for this
        # sample we'll just use the first document.
        document_cursor = document.cursor
        break

    # Start the next search from the document of interest.
    if document_cursor is None:
        return

    options = search.QueryOptions(cursor=document_cursor)
    query = search.Query(query_string=query_string, options=options)
    results = index.search(query)

    for document in results:
        print(document)
Exemple #4
0
def ttsearch(indexes,
             query_dict,
             limit=20,
             cache_cursor=True,
             use_cursor=True,
             user=None,
             ids_only=False):
    query = []
    if 'all' in query_dict:
        query = query_dict['all']
    else:
        if isinstance(query_dict, basestring):
            query = query_dict
        else:
            for key in query_dict.keys():
                query.append('%s: %s' % (key, query_dict.get(key)))
            query = ' '.join(query)
    try:
        results = []
        for index in indexes:
            if use_cursor:
                if not user:
                    raise Exception('must supply user for cursor')
                cursor = get_cached_index_cursor(index, user)
                if not cursor and use_cursor:
                    cursor = search.Cursor()
                elif cursor == 'done':
                    return None
                query_options = search.QueryOptions(limit=limit, cursor=cursor)
            else:
                query_options = search.QueryOptions(limit=limit)
            query = search.Query(query_string=query, options=query_options)
            resutls_obj = index.search(query)
            if cache_cursor:
                if not user:
                    raise Exception('must supply user for cursor')
                if resutls_obj.cursor:
                    cache_index_cursor(index, user, resutls_obj.cursor)
                else:
                    cache_index_cursor(index, user, 'done')
            results += resutls_obj.results
        result_ids = []
        if ids_only:
            for result in results:
                result_ids.append(result.doc_id)
            return result_ids
        else:
            for result in results:
                result_ids.append({
                    'fields': get_result_field_dict(result),
                    'id': result.doc_id
                })
            return result_ids
    except search.Error as e:
        raise e
Exemple #5
0
    def search(cls, user_from_email, request):
        organization = str(user_from_email.organization.id())
        index = search.Index(name="GlobalIndex")
        # Show only objects where you have permissions
        query_string = SEARCH_QUERY_MODEL % {
            "type": "Case",
            "query": request.q,
            "organization": organization,
            "owner": user_from_email.google_user_id,
            "collaborators": user_from_email.google_user_id,
        }
        search_results = []
        if request.limit:
            limit = int(request.limit)
        else:
            limit = 10
        next_cursor = None
        if request.pageToken:
            cursor = search.Cursor(web_safe_string=request.pageToken)
        else:
            cursor = search.Cursor(per_result=True)
        if limit:
            options = search.QueryOptions(limit=limit, cursor=cursor)
        else:
            options = search.QueryOptions(cursor=cursor)
        query = search.Query(query_string=query_string, options=options)
        try:
            if query:
                result = index.search(query)
                if len(result.results) == limit + 1:
                    next_cursor = result.results[-1].cursor.web_safe_string
                else:
                    next_cursor = None
                results = result.results[:limit]
                for scored_document in results:
                    kwargs = {
                        'id': scored_document.doc_id
                    }
                    for e in scored_document.fields:
                        if e.name in [
                            "title",
                            "contact_name",
                            "account_name",
                            "status"
                        ]:
                            kwargs[e.name] = e.value
                    search_results.append(CaseSearchResult(**kwargs))

        except search.Error:
            logging.exception('Search failed')
        return CaseSearchResults(
            items=search_results,
            nextPageToken=next_cursor
        )
Exemple #6
0
    def _buildQuery(self, query, sortq, sort_dict, doc_limit, offsetval):
        """Build and return a search query object."""

        # computed and returned fields examples.  Their use is not required
        # for the application to function correctly.
        computed_expr = search.FieldExpression(name='adjusted_price',
                                               expression='price * 1.08')
        returned_fields = [
            docs.Product.PID, docs.Product.DESCRIPTION, docs.Product.CATEGORY,
            docs.Product.AVG_RATING, docs.Product.PRICE,
            docs.Product.PRODUCT_NAME
        ]

        if sortq == 'relevance':
            # If sorting on 'relevance', use the Match scorer.
            sortopts = search.SortOptions(match_scorer=search.MatchScorer())
            search_query = search.Query(
                query_string=query.strip(),
                options=search.QueryOptions(
                    limit=doc_limit,
                    offset=offsetval,
                    sort_options=sortopts,
                    snippeted_fields=[docs.Product.DESCRIPTION],
                    returned_expressions=[computed_expr],
                    returned_fields=returned_fields))
        else:
            # Otherwise (not sorting on relevance), use the selected field as the
            # first dimension of the sort expression, and the average rating as the
            # second dimension, unless we're sorting on rating, in which case price
            # is the second sort dimension.
            # We get the sort direction and default from the 'sort_dict' var.
            if sortq == docs.Product.AVG_RATING:
                expr_list = [
                    sort_dict.get(sortq),
                    sort_dict.get(docs.Product.PRICE)
                ]
            else:
                expr_list = [
                    sort_dict.get(sortq),
                    sort_dict.get(docs.Product.AVG_RATING)
                ]
            sortopts = search.SortOptions(expressions=expr_list)
            # logging.info("sortopts: %s", sortopts)
            search_query = search.Query(
                query_string=query.strip(),
                options=search.QueryOptions(
                    limit=doc_limit,
                    offset=offsetval,
                    sort_options=sortopts,
                    snippeted_fields=[docs.Product.DESCRIPTION],
                    returned_expressions=[computed_expr],
                    returned_fields=returned_fields))
        return search_query
Exemple #7
0
def get_query_options(query_dict, limit=10):
    if 'lat' in query_dict and 'lon' in query_dict:
        print 'lat ' + str(query_dict['lat'])
        print 'lon ' + str(query_dict['lon'])
        loc_expr = "distance(place, geopoint(%f, %f))" % (query_dict['lon'],
                                                          query_dict['lat'])
        sortexpr = search.SortExpression(
            expression=loc_expr,
            direction=search.SortExpression.ASCENDING,
            default_value=45001)
        return search.QueryOptions(
            limit=limit,
            sort_options=search.SortOptions(expressions=[sortexpr]))
    else:
        return search.QueryOptions(limit=limit)
Exemple #8
0
    def get(self):
        query_type = self.request.get('query_type')

        index = search.Index(name="crowdsourced_mumbai_monsoon")
        if query_type == "NEAR_BY":
            latitude = self.request.get('lat')
            longitude = self.request.get('lon')

            geostring = "geopoint(%s,%s)" % (latitude,longitude)
            query_string = "distance(wlocation, %s) < 1000" % geostring
            query = search.Query(query_string = query_string)
            query_result(self,index, query)
            #self.response.write('NEAR_BY = %s' % geostring)
        elif query_type == "CAR_DATA":
         
            query_options = search.QueryOptions(    
                    returned_fields=['wlocation', 'water_level', 'wiper_speed', 'speed', 'car_break_down','supplier','car_parked'],
                    )
            self.response.write('WATER_LEVEL')
            self.response.write('Query Handler')
            query_string = "water_level > 1 AND supplier = car"
            query = search.Query(query_string = query_string, options = query_options)
        elif query_type == "HIGH_FLOODING":
        
            query_options = search.QueryOptions(    
                    returned_fields=['wlocation', 'water_level', 'wiper_speed', 'speed', 'car_break_down','supplier','car_parked'],
                    )
            self.response.write('WATER_LEVEL')
            self.response.write('Query Handler')
            for point_of_interest in high_flooding_areas:
                latitude = point_of_interest[0]
                longitude = point_of_interest[1]
                geostring = "geopoint(%s,%s)" % (latitude,longitude)
                query_string = "distance(wlocation, %s) < 5000"  % geostring
                query = search.Query(query_string = query_string, options = query_options)
        elif query_type == "DIVERSION":
             
            query_options = search.QueryOptions(    
                    returned_fields=['wlocation', 'water_level', 'wiper_speed', 'speed', 'car_break_down','supplier','car_parked'],
                    )
            self.response.write('WATER_LEVEL')
            self.response.write('Query Handler')
            for point_of_interest in diversion_areas:
                latitude = point_of_interest[0]
                longitude = point_of_interest[1]
                geostring = "geopoint(%s,%s)" % (latitude,longitude)
                query_string = "distance(wlocation, %s) < 5000"  % geostring
                query = search.Query(query_string = query_string, options = query_options)
Exemple #9
0
def find_documents(words, limit, cursor):
    try:
        # subject_desc = search.SortExpression(
        #    expression='EventId',
        #   direction=search.SortExpression.DESCENDING,
        #  default_value='')

        # Sort up to 1000 matching results by subject in descending order
        #sort = search.SortOptions(expressions=[subject_desc], limit=1000)

        # Set query options
        options = search.QueryOptions(
            limit=limit,  # the number of results to return
            cursor=cursor,
            #sort_options=sort,
            returned_fields=['EventId', 'content'],
            snippeted_fields=['content'])
        query_string = words
        query = search.Query(query_string=query_string, options=options)

        index = search.Index(name=_INDEX_NAME)

        # Execute the query
        return index.search(query)
    except search.Error:
        logging.exception('Search failed')
        return None
  def get(self, terms):
    self.response.headers['Access-Control-Allow-Origin'] = '*'
    scoring = self.request.get('noscore', None) is None
    include_results = self.request.get('noresults', None) is None
    include_count = self.request.get('count', None) is not None
    request_cursor = self.request.get('cursor', None)

    if not include_results:
      scoring = False
      include_count = True
    try:
      limit = min(20, int(self.request.get('limit', 20)))
    except ValueError:
      self.response.set_status(400)
      return
    index = search.Index('repo')
    cursor = search.Cursor(web_safe_string=request_cursor)
    try:
      # Accuracy refers to accurate till n results.
      accuracy = 2000 if include_count else None
      sort_options = search.SortOptions(match_scorer=search.MatchScorer()) if scoring else None
      query_options = search.QueryOptions(limit=limit, number_found_accuracy=accuracy, sort_options=sort_options, cursor=cursor)
      search_results = index.search(search.Query(query_string=terms, options=query_options))
      cursor = search_results.cursor
    except search.QueryError:
      self.response.set_status(400)
      self.response.write('bad query')
      return

    count = search_results.number_found
    if include_results:
      result_futures = []
      for result in search_results.results:
        (owner, repo) = result.doc_id.split('/')
        version = None
        for field in result.fields:
          if field.name == 'version':
            version = field.value
            break
        library_key = ndb.Key(Library, Library.id(owner, repo))
        result_futures.append(LibraryMetadata.brief_async(library_key, version, assume_latest=True))
      results = []
      for future in result_futures:
        result = yield future
        if result is None:
          # Fixup count when we skip over incomplete entries.
          count = count - 1
        if result is not None:
          results.append(result)

    result = {
        'cursor': cursor.web_safe_string if cursor and include_results else None,
    }
    if include_count:
      result['count'] = count
    if include_results:
      result['results'] = results

    self.response.headers['Content-Type'] = 'application/json'
    self.response.write(json.dumps(result))
Exemple #11
0
def search_index(Model, paginate_limit, query_string, cursor, index=None, transformer=None):
    """
    Searches using the provided index (or an automatically determine one).

    Expects the search query to be in the ``query`` request parameter.

    Also takes care of setting pagination information if the :class:`pagination component <ferris.components.pagination.Pagnation>` is present.
    """

    limit = paginate_limit
    response = dict()

    try:
        if cursor:
            cursor = search.Cursor(web_safe_string=cursor)
        else:
            cursor = search.Cursor()

        options = search.QueryOptions(
            limit=limit,
            ids_only=True,
            cursor=cursor)
        query = search.Query(query_string=query_string, options=options)

        if not index:
            if hasattr(Model, 'get_search_index'):
                index = Model.get_search_index()
            elif hasattr(Model, 'search_index_name'):
                index = Model.search_index_name
            else:
                index = 'auto_ix_%s' % Model._get_kind()
        index = search.Index(name=index)

        logging.debug(
            "Searching %s with \"%s\" and cursor %s" % (index.name, query.query_string, cursor.web_safe_string))
        index_results = index.search(query)

        if issubclass(Model, ndb.Model):
            results = ndb.get_multi([ndb.Key(urlsafe=x.doc_id) for x in index_results])
            if transformer is not None:
                results = [transformer(x) for x in results if x]
            else:
                results = [x for x in results if x]
        else:
            results = Model.get([x.doc_id for x in index_results])
            Model.prefetch_references(results)

        if index_results.cursor:
            logging.info(index_results)
            response['limit'] = limit
            response['cursor'] = cursor.web_safe_string
            response['query_string'] = query_string
            response['next_cursor'] = index_results.cursor.web_safe_string

    except (search.Error, search.query_parser.QueryException) as e:
        results = []
        logging.info("error occurred %s " % e)

    response['results'] = results
    return response
def get_search_suggestions(searchstring):
    logging.info("search string was: " + searchstring)
    # get the search string
    query= searchstring
    #reg_ex = searchstring+ ".*"
    reg_ex = re.compile(searchstring + ".*")
    # create the query object
    sort_expression = [search.SortExpression(expression='rank', 
        direction = search.SortExpression.DESCENDING)]
    sort_opt = search.SortOptions(expressions=sort_expression)
    query_options = search.QueryOptions(limit = 20, sort_options=sort_opt)
    query_obj = search.Query(query_string=query, options=query_options)
    results = search.Index(name=_INDEX_NAME).search(query=query_obj)

    logging.info(results)
    # we need to limit the suggestion to at 20 possible options
    # sorted alphabetically
    possibilities = []
    temp_tags = []
    for result in results:
        for field in result.fields:
            if field.name == "stream_name":
                possibilities.append(field.value)
            if field.name == "tags" and field.value is not None:
                temp_tags = field.value.split(" ")
                possibilities.extend(temp_tags)
    

    possibilities = [x for x in possibilities if x.startswith(searchstring)]
    sorted_possibilities = sorted(possibilities)
    
    logging.info(sorted_possibilities)
    return sorted_possibilities[:20]
Exemple #13
0
    def get(self):
        template = JINJA_ENVIRONMENT.get_template(
            'templates/members/member_list.html')
        countries = Country.all().order('order').fetch(LIMIT)
        statuses = Status.all().order('order').fetch(LIMIT)
        types = MemberType.all().order('order').fetch(LIMIT)

        # TODO: Make sensible
        query_string = ''
        current_search = self.request.get('search')
        query_string += current_search

        current_status = self.request.get('status')
        if current_status != '':
            if query_string != '':
                query_string += ' AND '
            query_string += 'status:' + current_status

        current_type = self.request.get('type')
        if current_type != '':
            if query_string != '':
                query_string += ' AND '
            query_string += 'type:' + current_type

        current_country = self.request.get('country')
        if current_country and current_country != '':
            if query_string != '':
                query_string += ' AND '
            query_string += 'country:' + current_country

        index = search.Index(name='members')
        result = index.search(query=search.Query(
            query_string, options=search.QueryOptions(limit=LIMIT)))

        members = list()
        for document in result.results:
            members.append(Member.search_member_from_document(document))

        members = sorted(members, key=lambda x: x.number)

        current_status_name = current_status
        current_type_name = current_type

        total = memcache.get('member_count')
        if not total:
            total = 0

        data = {
            'countries': countries,
            'statuses': statuses,
            'types': types,
            'members': members,
            'current_status': current_status,
            'current_type': current_type,
            'current_search': current_search,
            'found': result.number_found,
            'shown': len(members),
            'total': total
        }
        self.response.write(template.render(data))
Exemple #14
0
def employees_matching_prefix(prefix):
    """Returns a list of (full name, username) tuples for users that match the given prefix."""
    if not prefix:
        return []

    user_tuples = set()

    search_query = search.Query(query_string=prefix,
                                options=search.QueryOptions(limit=15))
    results = search.Index(name=INDEX_NAME).search(search_query)
    for r in results:
        username, full_name = None, None
        for f in r.fields:
            if f.name == 'full_name':
                full_name = f.value
            elif f.name == 'username':
                username = f.value
            else:
                continue
        if username is not None and full_name is not None:
            photo_url = Employee.query(
                Employee.username == username).get().get_photo_url()
            user_tuples.add((full_name, username, photo_url))

    user_tuples = list(user_tuples)
    user_tuples.sort()
    return user_tuples
Exemple #15
0
def search_artist():
    page_size = int(request.args.get('page_size') or 30)
    offset = int(request.args.get('offset') or 0)

    index_artist = search.Index('Artist')

    search_query = search.Query(query_string=generate_search_query(
        request.args).strip(),
                                options=search.QueryOptions(limit=page_size,
                                                            offset=offset))

    artist_results = index_artist.search(search_query)

    keys = []
    for doc in artist_results:
        keys.append(next(f.value for f in doc.fields if f.name == 'key'))

    artists = []
    for key in keys:
        a = ndb.Key(urlsafe=key).get()
        if a is not None:
            albums = Album.query(Album.artist_id == a.key).filter(
                Album.is_active == True).fetch()
            if albums:
                artists.append(a)

    return jsonify(artists=artists)
Exemple #16
0
  def search(
      cls, query_string='', query_limit=20, offset=0, sort_options=None,
      returned_fields=None):
    """Searches for documents that match a given query string.

    Args:
      query_string: str, the query to match against documents in the index
      query_limit: int, the limit on number of documents to return in results.
      offset: int, the number of matched documents to skip before beginning to
          return results.
      sort_options: search.SortOptions, an object specifying a multi-dimensional
          sort over search results.
      returned_fields: List[str], an iterable of names of fields to return in
          search results.

    Returns:
      A SearchResults object containing a list of documents matched by the
          query.
    """
    index = cls.get_index()

    try:
      query = search.Query(
          query_string=cls.format_query(query_string),
          options=search.QueryOptions(
              offset=offset, limit=query_limit, sort_options=sort_options,
              returned_fields=returned_fields),
      )
    except search.QueryError:
      return search.SearchResults(number_found=0)

    return index.search(query)
Exemple #17
0
    def get(self):
        confirm = self.request.get('confirm')
        if confirm != "true":
            return
        epub_key = self.request.get('key')
        epub = db.get(epub_key)
        account = get_current_session().get("account")
        entry = model.LibraryEntry.all().filter("epub = ", epub).filter(
            "user ="******"private", "public"]:
                    index = search.Index(indexName)
                    opts = search.QueryOptions(limit=1000, ids_only=True)
                    query = search.Query(query_string="book:%s" % epub_key,
                                         options=opts)
                    docs = index.search(query)
                    for doc in docs:
                        index.remove(doc.doc_id)
                blobstore.delete(epub.blob.key())
                db.delete(epub)

            self.redirect('/list')
        else:
            self.response.out.write("Not permitted")
Exemple #18
0
    def _run_query(self):
        offset = self._offset
        limit = self._limit
        sort_expressions = self._sorts

        if self._raw_query is not None:
            query_string = self._raw_query
        else:
            query_string = str(self.query)

        kwargs = {"expressions": sort_expressions}
        if self._match_scorer:
            kwargs["match_scorer"] = self._match_scorer

        snippet_words = self.get_snippet_words()
        field_expressions = self.get_snippet_expressions(snippet_words)

        sort_options = search_api.SortOptions(**kwargs)
        search_options = search_api.QueryOptions(
            offset=offset,
            limit=limit,
            sort_options=sort_options,
            ids_only=self.ids_only,
            number_found_accuracy=100,
            returned_expressions=field_expressions,
        )
        search_query = search_api.Query(query_string=query_string,
                                        options=search_options)

        self._results_response = self.index.search(search_query)
        self._number_found = self._results_response.number_found
    def search_query(self, query_string, page=0):

        # Create sort options to sort on price and brand.
        sort_ts = search.SortExpression(
            expression='ts',
            direction=search.SortExpression.DESCENDING,
            default_value=0)
        sort_options = search.SortOptions(expressions=[sort_ts])

        # Create query options using the sort options and expressions created
        # above.
        query_options = search.QueryOptions(limit=self.MSG_PER_PAGE_NUM,
                                            offset=page *
                                            self.MSG_PER_PAGE_NUM,
                                            returned_fields=['msg_key'],
                                            sort_options=sort_options)

        # Build the Query and run the search
        try:
            query = search.Query(query_string=query_string,
                                 options=query_options)
        except search.QueryError:
            return []
        results = self.index.search(query)

        return results
Exemple #20
0
def search_albums():
    page_size = int(request.args.get('page_size') or 30)
    offset = int(request.args.get('offset') or 0)

    index_albums = search.Index('Album')

    search_query = search.Query(query_string=generate_search_query(
        request.args).strip(),
                                options=search.QueryOptions(limit=page_size,
                                                            offset=offset))

    album_results = index_albums.search(search_query)

    keys = []
    for doc in album_results:
        keys.append(next(f.value for f in doc.fields if f.name == 'key'))

    # TODO remove Try and except on the final production server
    albums = []
    for key in keys:
        try:
            a = ndb.Key(urlsafe=key).get()
        except BaseException:
            continue

        if a is not None:
            albums.append(a)

    return jsonify(albums=albums)
Exemple #21
0
def query_options():
    index = search.Index('products')
    query_string = "product: piano AND price < 5000"

    # Create sort options to sort on price and brand.
    sort_price = search.SortExpression(
        expression='price',
        direction=search.SortExpression.DESCENDING,
        default_value=0)
    sort_brand = search.SortExpression(
        expression='brand',
        direction=search.SortExpression.DESCENDING,
        default_value="")
    sort_options = search.SortOptions(expressions=[sort_price, sort_brand])

    # Create field expressions to add new fields to the scored documents.
    price_per_note_expression = search.FieldExpression(name='price_per_note',
                                                       expression='price/88')
    ivory_expression = search.FieldExpression(
        name='ivory', expression='snippet("ivory", summary, 120)')

    # Create query options using the sort options and expressions created
    # above.
    query_options = search.QueryOptions(
        limit=25,
        returned_fields=['model', 'price', 'description'],
        returned_expressions=[price_per_note_expression, ivory_expression],
        sort_options=sort_options)

    # Build the Query and run the search
    query = search.Query(query_string=query_string, options=query_options)
    results = index.search(query)
    for scored_document in results:
        print(scored_document)
def search_index(query_string):

    group_url_prefix = '/group/'
    group_index = search.Index(name='groups')

    query_options = search.QueryOptions(
        limit=12, returned_fields=['name', 'description', 'group_image'])

    query_string = process_query_string(query_string)
    query = search.Query(query_string=query_string, options=query_options)
    documents = group_index.search(query)
    if len(documents.results) == 0:
        return None

    results = []
    for doc in documents:
        temp = {}
        fields = doc.fields
        temp["name"] = fields[0].value
        temp["description"] = fields[1].value
        temp["image"] = fields[2].value
        temp["url"] = group_url_prefix + doc.doc_id
        results.append(temp)

    return results
def search(repo, query_txt, max_results):
    """
    Searches person with index.
    Args:
        repo: The name of repository
        query_txt: Search query
        max_results: The max number of results you want.(Maximum: 1000)

    Returns:
        results[<model.Person>, ...]

    Raises:
        search.Error: An error occurred when the index name is unknown
                      or the query has syntax error.
    """
    #TODO: Sanitaize query_txt
    results = []
    if not query_txt:
        return results
    index = appengine_search.Index(name=PERSON_FULL_TEXT_INDEX_NAME)
    query_txt += ' AND (repo: ' + repo + ')'
    options = appengine_search.QueryOptions(
        limit=max_results,
        returned_fields=['record_id'])
    index_results = index.search(appengine_search.Query(
        query_string=query_txt, options=options))
    for document in index_results:
        id = document.fields[0].value
        results.append(model.Person.get_by_key_name(repo + ':' + id))
    return results
Exemple #24
0
def search_tracks():
    page_size = int(request.args.get('page_size') or 30)
    offset = int(request.args.get('offset') or 0)

    index_tracks = search.Index('Track')

    search_query = search.Query(query_string=generate_search_query(
        request.args).strip(),
                                options=search.QueryOptions(limit=page_size,
                                                            offset=offset))
    logging.log(logging.INFO, generate_search_query(request.args).strip())
    track_results = index_tracks.search(search_query)

    keys = []
    for doc in track_results:
        keys.append(next(f.value for f in doc.fields if f.name == "key"))

    tracks = []
    for key in keys:
        a = ndb.Key(urlsafe=key).get()
        if a is not None:
            album_for_track = Album.query().filter(Album.tracks == a.key).get()
            if not album_for_track or not album_for_track.is_active:
                continue
            tracks.append({"album": album_for_track, "track": a})

    return jsonify(tracks=tracks)
Exemple #25
0
    def post(self):
        enforce_login(self)
        if self.request.get(
                'license') is not None and not users.is_current_user_admin():
            self.redirect("/")
        epub = db.get(self.request.get('epub_key'))
        if not users.is_current_user_admin() and epub.entry_count() > 1:
            self.redirect("/")
        epub.language = self.request.get('language')
        epub.title = self.request.get('title')
        epub.creator = self.request.get('creator')
        epub.publisher = self.request.get('publisher')
        epub.rights = self.request.get('rights')
        epub.contributor = self.request.get('contributor')
        epub.identifier = self.request.get('identifier')
        epub.description = self.request.get('description')
        epub.date = self.request.get('date')

        license = self.request.get('license')
        if epub.license != license:
            if license == "Public Domain" or license == "Creative Commons":
                unpacker = unpack.Unpacker()
                unpacker.index_epub(epub, "public")
            else:
                index = search.Index("public")
                opts = search.QueryOptions(limit=1000, ids_only=True)
                query = search.Query(query_string="book:%s" % epub.key(),
                                     options=opts)
                docs = index.search(query)
                for doc in docs:
                    index.remove(doc.doc_id)

        epub.license = self.request.get('license')
        epub.put()
        self.redirect("/book/" + str(epub.key().id()))
 def _get_candidate_doc_events(self, ids_only=True):
     clauses = []
     if self.query.bounds:
         # We try to keep searches as simple as possible,
         # using just AND queries on latitude/longitude.
         # But for stuff crossing +/-180 degrees,
         # we need to do an OR longitude query on each side.
         latitudes = (self.query.bounds[0][0], self.query.bounds[1][0])
         longitudes = (self.query.bounds[0][1], self.query.bounds[1][1])
         clauses += ['latitude >= %s AND latitude <= %s' % latitudes]
         if longitudes[0] < longitudes[1]:
             clauses += ['longitude >= %s AND longitude <= %s' % longitudes]
         else:
             clauses += [
                 '(longitude >= %s OR longitude <= %s)' % longitudes
             ]
     if self.query.keywords:
         clauses += ['(%s)' % self.query.keywords]
     #if self.query.min_likes:
     #    clauses += ['like_count > %d' % self.query.min_likes]
     if clauses:
         full_search = ' '.join(clauses)
         logging.info("Doing search for %r", full_search)
         #TODO(lambert): implement pagination
         if ids_only:
             options = {'returned_fields': []}
         else:
             options = {'returned_fields': self.extra_fields}
         options = search.QueryOptions(limit=self.limit, **options)
         query = search.Query(query_string=full_search, options=options)
         doc_search_results = search_source.SourceIndex.search(query)
         return doc_search_results.results
     return []
Exemple #27
0
    def get(self):
        searchv = self.request.get('s', '')
        cursorv = self.request.get('cursor', None)
        options = search.QueryOptions(
            limit=40,
            cursor=search.Cursor(web_safe_string=cursorv),
            ids_only=True)
        query = search.Query(query_string=searchv, options=options)
        index = search.Index(name=SEARCH_INDEX)
        results = index.search(query)

        results_keys = [
            ndb.Key(*ndb.Key(urlsafe=i.doc_id).flat()) for i in results.results
        ]
        haikus = ndb.get_multi(results_keys)
        haikus_list = [h.ToDict() for h in haikus if h is not None]

        next_cursor = None
        if results.cursor:
            next_cursor = results.cursor.web_safe_string

        template = JINJA_ENVIRONMENT.get_template('haiku-search.html')
        self.response.out.write(
            template.render({
                'search': searchv,
                'haikus': haikus_list,
                'cursor': next_cursor,
            }))
Exemple #28
0
    def get_results(self, query):
        # logging.info("get_results start \n{}".format(time.clock() * 1000 % 1000))
        query_ascii = unidecode(query)
        query_ascii = Autocompleter.SANITIZE_PATTERN.sub("", query_ascii)

        logging.info(u"Autocomplete search for '{}' sanitized to '{}'.".format(
            query, query_ascii))

        if not query_ascii:
            return []

        # logging.info("before index search \n{}".format(time.clock() * 1000 % 1000))
        results = self.index.search(query=search.Query(
            'tokens:({})'.format(query_ascii),
            options=search.QueryOptions(limit=5, ids_only=True)))
        # logging.info("after index search \n{}".format(time.clock() * 1000 % 1000))
        logging.info("Got {} results.".format(len(results.results)))
        assert (isinstance(results, search.SearchResults))
        list_of_keys = []
        for search_result in results.results:
            assert (isinstance(search_result, search.ScoredDocument))
            key = ndb.Key('BookRecord', search_result.doc_id)
            list_of_keys.append(key)
        # logging.info("get_multi start \n{}".format(time.clock() * 1000 % 1000))
        return ndb.get_multi(list_of_keys)
def get_search_results(query, page=0):
    assert page >= 0
    assert page < 20
    extra = None

    expr_list = [
        search.SortExpression(expression='author',
                              default_value='',
                              direction=search.SortExpression.DESCENDING)
    ]

    sort_opts = search.SortOptions(expressions=expr_list)
    query_options = search.QueryOptions(limit=30, sort_options=sort_opts)
    query_obj = search.Query(query_string=query, options=query_options)

    results_posts = search.Index(name=_INDEX_SEARCH).search(query=query_obj)
    results = []
    for result in results_posts:
        a = Post.get_by_id(long(result.doc_id))
        results.append(a)

    if len(results) > PAGE_SIZE:
        if page < 19:
            extra = results[-1]
        quotes = results[:PAGE_SIZE]

    return results, extra
Exemple #30
0
  def get(self):
    super(SearchIndexHandler, self).get()
    index_name = self.request.get('index')
    if not index_name:
      self.redirect('/search')
      return
    start = self.request.get_range('start', min_value=0, default=0)
    query = self.request.get('query')
    namespace = self.request.get('namespace')
    index = search.Index(name=index_name, namespace=namespace)
    resp = index.search(query=search.Query(
        query_string=query,
        options=search.QueryOptions(offset=start,
                                    limit=self._MAX_RESULTS_PER_PAGE)))
    has_more = resp.number_found > start + self._MAX_RESULTS_PER_PAGE

    values = {
        'namespace': namespace,
        'index': index_name,
        'start': start,
        'query': query,
        'values': self._process_search_response(resp),
    }
    self._handle_paging(start, has_more, values)
    self.response.write(self.render('search_index.html', values))