def _search_by_name(word, cursor_str, limit): """Returns TaskResultSummary in -created_ts order containing the word.""" cursor = search.Cursor(web_safe_string=cursor_str, per_result=True) index = search.Index(name='requests') def item_to_id(item): for field in item.fields: if field.name == 'id': return field.value # The code is structured to handle incomplete entities but still return # 'limit' items. This is done by fetching a few more entities than necessary, # then keeping track of the cursor per item so the right cursor can be # returned. opts = search.QueryOptions(limit=limit + 5, cursor=cursor) results = index.search(search.Query('name:%s' % word, options=opts)) result_summary_keys = [] cursors = [] for item in results.results: value = item_to_id(item) if value: result_summary_keys.append( task_pack.unpack_result_summary_key(value)) cursors.append(item.cursor) # Handle None result value. See make_request() for details about how this can # happen. tasks = [] cursor = None for task, c in zip(ndb.get_multi(result_summary_keys), cursors): if task: cursor = c tasks.append(task) if len(tasks) == limit: # Drop the rest. break else: if len(cursors) == limit + 5: while len(tasks) < limit: # Go into the slow path, seems like we got a lot of corrupted items. opts = search.QueryOptions(limit=limit - len(tasks) + 5, cursor=cursor) results = index.search( search.Query('name:%s' % word, options=opts)) if not results.results: # Nothing else. cursor = None break for item in results.results: value = item_to_id(item) if value: cursor = item.cursor task = task_pack.unpack_result_summary_key(value).get() if task: tasks.append(task) if len(tasks) == limit: break cursor_str = cursor.web_safe_string if cursor else None return tasks, cursor_str
def query_per_document_cursor(index, query_string): cursor = search.Cursor(per_result=True) # Build the query using the cursor. options = search.QueryOptions(cursor=cursor) query = search.Query(query_string=query_string, options=options) # Get the results. results = index.search(query) document_cursor = None for document in results: # discover some document of interest and grab its cursor, for this # sample we'll just use the first document. document_cursor = document.cursor break # Start the next search from the document of interest. if document_cursor is None: return options = search.QueryOptions(cursor=document_cursor) query = search.Query(query_string=query_string, options=options) results = index.search(query) for document in results: print(document)
def _query_search_index_by_offset(index, query_string, query_options, is_get_all=True, result_offset=None): result_documents = [] result_returned = 1 if not result_offset: result_offset = 0 try: if is_get_all: while result_returned > 0: query_options._offset = result_offset query = search.Query(query_string=query_string, options=query_options) result = index.search(query) result_returned = len(result.results) if result_returned > 0: result_documents.extend(result.results) result_offset += result_returned else: if result_offset: query_options._offset = result_offset query = search.Query(query_string=query_string, options=query_options) result = index.search(query) result_returned = len(result.results) if result_returned > 0: result_documents = result.results except search.Error as ex: logging.exception(ex) return None return result_documents
def _buildQuery(self, query, sortq, sort_dict, doc_limit, offsetval): """Build and return a search query object.""" # computed and returned fields examples. Their use is not required # for the application to function correctly. computed_expr = search.FieldExpression(name='adjusted_price', expression='price * 1.08') returned_fields = [ docs.Product.PID, docs.Product.DESCRIPTION, docs.Product.CATEGORY, docs.Product.AVG_RATING, docs.Product.PRICE, docs.Product.PRODUCT_NAME ] if sortq == 'relevance': # If sorting on 'relevance', use the Match scorer. sortopts = search.SortOptions(match_scorer=search.MatchScorer()) search_query = search.Query( query_string=query.strip(), options=search.QueryOptions( limit=doc_limit, offset=offsetval, sort_options=sortopts, snippeted_fields=[docs.Product.DESCRIPTION], returned_expressions=[computed_expr], returned_fields=returned_fields)) else: # Otherwise (not sorting on relevance), use the selected field as the # first dimension of the sort expression, and the average rating as the # second dimension, unless we're sorting on rating, in which case price # is the second sort dimension. # We get the sort direction and default from the 'sort_dict' var. if sortq == docs.Product.AVG_RATING: expr_list = [ sort_dict.get(sortq), sort_dict.get(docs.Product.PRICE) ] else: expr_list = [ sort_dict.get(sortq), sort_dict.get(docs.Product.AVG_RATING) ] sortopts = search.SortOptions(expressions=expr_list) # logging.info("sortopts: %s", sortopts) search_query = search.Query( query_string=query.strip(), options=search.QueryOptions( limit=doc_limit, offset=offsetval, sort_options=sortopts, snippeted_fields=[docs.Product.DESCRIPTION], returned_expressions=[computed_expr], returned_fields=returned_fields)) return search_query
def get(self): query_type = self.request.get('query_type') index = search.Index(name="crowdsourced_mumbai_monsoon") if query_type == "NEAR_BY": latitude = self.request.get('lat') longitude = self.request.get('lon') geostring = "geopoint(%s,%s)" % (latitude,longitude) query_string = "distance(wlocation, %s) < 1000" % geostring query = search.Query(query_string = query_string) query_result(self,index, query) #self.response.write('NEAR_BY = %s' % geostring) elif query_type == "CAR_DATA": query_options = search.QueryOptions( returned_fields=['wlocation', 'water_level', 'wiper_speed', 'speed', 'car_break_down','supplier','car_parked'], ) self.response.write('WATER_LEVEL') self.response.write('Query Handler') query_string = "water_level > 1 AND supplier = car" query = search.Query(query_string = query_string, options = query_options) elif query_type == "HIGH_FLOODING": query_options = search.QueryOptions( returned_fields=['wlocation', 'water_level', 'wiper_speed', 'speed', 'car_break_down','supplier','car_parked'], ) self.response.write('WATER_LEVEL') self.response.write('Query Handler') for point_of_interest in high_flooding_areas: latitude = point_of_interest[0] longitude = point_of_interest[1] geostring = "geopoint(%s,%s)" % (latitude,longitude) query_string = "distance(wlocation, %s) < 5000" % geostring query = search.Query(query_string = query_string, options = query_options) elif query_type == "DIVERSION": query_options = search.QueryOptions( returned_fields=['wlocation', 'water_level', 'wiper_speed', 'speed', 'car_break_down','supplier','car_parked'], ) self.response.write('WATER_LEVEL') self.response.write('Query Handler') for point_of_interest in diversion_areas: latitude = point_of_interest[0] longitude = point_of_interest[1] geostring = "geopoint(%s,%s)" % (latitude,longitude) query_string = "distance(wlocation, %s) < 5000" % geostring query = search.Query(query_string = query_string, options = query_options)
def get_search_results(query, page=0): assert page >= 0 assert page < 20 extra = None expr_list = [ search.SortExpression(expression='author', default_value='', direction=search.SortExpression.DESCENDING) ] sort_opts = search.SortOptions(expressions=expr_list) query_options = search.QueryOptions(limit=30, sort_options=sort_opts) query_obj = search.Query(query_string=query, options=query_options) results_posts = search.Index(name=_INDEX_SEARCH).search(query=query_obj) results = [] for result in results_posts: a = Post.get_by_id(long(result.doc_id)) results.append(a) if len(results) > PAGE_SIZE: if page < 19: extra = results[-1] quotes = results[:PAGE_SIZE] return results, extra
def get(self): confirm = self.request.get('confirm') if confirm != "true": return epub_key = self.request.get('key') epub = db.get(epub_key) account = get_current_session().get("account") entry = model.LibraryEntry.all().filter("epub = ", epub).filter( "user ="******"private", "public"]: index = search.Index(indexName) opts = search.QueryOptions(limit=1000, ids_only=True) query = search.Query(query_string="book:%s" % epub_key, options=opts) docs = index.search(query) for doc in docs: index.remove(doc.doc_id) blobstore.delete(epub.blob.key()) db.delete(epub) self.redirect('/list') else: self.response.out.write("Not permitted")
def _run_query(self): offset = self._offset limit = self._limit sort_expressions = self._sorts if self._raw_query is not None: query_string = self._raw_query else: query_string = str(self.query) kwargs = {"expressions": sort_expressions} if self._match_scorer: kwargs["match_scorer"] = self._match_scorer snippet_words = self.get_snippet_words() field_expressions = self.get_snippet_expressions(snippet_words) sort_options = search_api.SortOptions(**kwargs) search_options = search_api.QueryOptions( offset=offset, limit=limit, sort_options=sort_options, ids_only=self.ids_only, number_found_accuracy=100, returned_expressions=field_expressions, ) search_query = search_api.Query(query_string=query_string, options=search_options) self._results_response = self.index.search(search_query) self._number_found = self._results_response.number_found
def search_index(query_string): group_url_prefix = '/group/' group_index = search.Index(name='groups') query_options = search.QueryOptions( limit=12, returned_fields=['name', 'description', 'group_image']) query_string = process_query_string(query_string) query = search.Query(query_string=query_string, options=query_options) documents = group_index.search(query) if len(documents.results) == 0: return None results = [] for doc in documents: temp = {} fields = doc.fields temp["name"] = fields[0].value temp["description"] = fields[1].value temp["image"] = fields[2].value temp["url"] = group_url_prefix + doc.doc_id results.append(temp) return results
def get(self, terms): self.response.headers['Access-Control-Allow-Origin'] = '*' scoring = self.request.get('noscore', None) is None include_results = self.request.get('noresults', None) is None include_count = self.request.get('count', None) is not None request_cursor = self.request.get('cursor', None) if not include_results: scoring = False include_count = True try: limit = min(20, int(self.request.get('limit', 20))) except ValueError: self.response.set_status(400) return index = search.Index('repo') cursor = search.Cursor(web_safe_string=request_cursor) try: # Accuracy refers to accurate till n results. accuracy = 2000 if include_count else None sort_options = search.SortOptions(match_scorer=search.MatchScorer()) if scoring else None query_options = search.QueryOptions(limit=limit, number_found_accuracy=accuracy, sort_options=sort_options, cursor=cursor) search_results = index.search(search.Query(query_string=terms, options=query_options)) cursor = search_results.cursor except search.QueryError: self.response.set_status(400) self.response.write('bad query') return count = search_results.number_found if include_results: result_futures = [] for result in search_results.results: (owner, repo) = result.doc_id.split('/') version = None for field in result.fields: if field.name == 'version': version = field.value break library_key = ndb.Key(Library, Library.id(owner, repo)) result_futures.append(LibraryMetadata.brief_async(library_key, version, assume_latest=True)) results = [] for future in result_futures: result = yield future if result is None: # Fixup count when we skip over incomplete entries. count = count - 1 if result is not None: results.append(result) result = { 'cursor': cursor.web_safe_string if cursor and include_results else None, } if include_count: result['count'] = count if include_results: result['results'] = results self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(result))
def search_query(self, query_string, page=0): # Create sort options to sort on price and brand. sort_ts = search.SortExpression( expression='ts', direction=search.SortExpression.DESCENDING, default_value=0) sort_options = search.SortOptions(expressions=[sort_ts]) # Create query options using the sort options and expressions created # above. query_options = search.QueryOptions(limit=self.MSG_PER_PAGE_NUM, offset=page * self.MSG_PER_PAGE_NUM, returned_fields=['msg_key'], sort_options=sort_options) # Build the Query and run the search try: query = search.Query(query_string=query_string, options=query_options) except search.QueryError: return [] results = self.index.search(query) return results
def query_options(): index = search.Index('products') query_string = "product: piano AND price < 5000" # Create sort options to sort on price and brand. sort_price = search.SortExpression( expression='price', direction=search.SortExpression.DESCENDING, default_value=0) sort_brand = search.SortExpression( expression='brand', direction=search.SortExpression.DESCENDING, default_value="") sort_options = search.SortOptions(expressions=[sort_price, sort_brand]) # Create field expressions to add new fields to the scored documents. price_per_note_expression = search.FieldExpression(name='price_per_note', expression='price/88') ivory_expression = search.FieldExpression( name='ivory', expression='snippet("ivory", summary, 120)') # Create query options using the sort options and expressions created # above. query_options = search.QueryOptions( limit=25, returned_fields=['model', 'price', 'description'], returned_expressions=[price_per_note_expression, ivory_expression], sort_options=sort_options) # Build the Query and run the search query = search.Query(query_string=query_string, options=query_options) results = index.search(query) for scored_document in results: print(scored_document)
def search_tweet(cls, req): index = search.Index('tweets') tweet = req.request.get('text').lower().rstrip() logging.info(tweet) if len(tweet) == 0: return { "status": False, "message": "Search field cannot empty", "data": {} } search_query = search.Query(query_string=tweet) logging.info("---------------------") logging.info(search_query) tweets = [] for document in index.search(search_query): val = AccountFunction.get_tweet_by_key(document.doc_id) if val: tweets.append(val) if not tweets: return { "status": False, "message": "No tweets available", "data": {} } len(tweets) return {"status": True, "message": "", "data": tweets}
def search_artist(): page_size = int(request.args.get('page_size') or 30) offset = int(request.args.get('offset') or 0) index_artist = search.Index('Artist') search_query = search.Query(query_string=generate_search_query( request.args).strip(), options=search.QueryOptions(limit=page_size, offset=offset)) artist_results = index_artist.search(search_query) keys = [] for doc in artist_results: keys.append(next(f.value for f in doc.fields if f.name == 'key')) artists = [] for key in keys: a = ndb.Key(urlsafe=key).get() if a is not None: albums = Album.query(Album.artist_id == a.key).filter( Album.is_active == True).fetch() if albums: artists.append(a) return jsonify(artists=artists)
def search_tracks(): page_size = int(request.args.get('page_size') or 30) offset = int(request.args.get('offset') or 0) index_tracks = search.Index('Track') search_query = search.Query(query_string=generate_search_query( request.args).strip(), options=search.QueryOptions(limit=page_size, offset=offset)) logging.log(logging.INFO, generate_search_query(request.args).strip()) track_results = index_tracks.search(search_query) keys = [] for doc in track_results: keys.append(next(f.value for f in doc.fields if f.name == "key")) tracks = [] for key in keys: a = ndb.Key(urlsafe=key).get() if a is not None: album_for_track = Album.query().filter(Album.tracks == a.key).get() if not album_for_track or not album_for_track.is_active: continue tracks.append({"album": album_for_track, "track": a}) return jsonify(tracks=tracks)
def get_search_suggestions(searchstring): logging.info("search string was: " + searchstring) # get the search string query= searchstring #reg_ex = searchstring+ ".*" reg_ex = re.compile(searchstring + ".*") # create the query object sort_expression = [search.SortExpression(expression='rank', direction = search.SortExpression.DESCENDING)] sort_opt = search.SortOptions(expressions=sort_expression) query_options = search.QueryOptions(limit = 20, sort_options=sort_opt) query_obj = search.Query(query_string=query, options=query_options) results = search.Index(name=_INDEX_NAME).search(query=query_obj) logging.info(results) # we need to limit the suggestion to at 20 possible options # sorted alphabetically possibilities = [] temp_tags = [] for result in results: for field in result.fields: if field.name == "stream_name": possibilities.append(field.value) if field.name == "tags" and field.value is not None: temp_tags = field.value.split(" ") possibilities.extend(temp_tags) possibilities = [x for x in possibilities if x.startswith(searchstring)] sorted_possibilities = sorted(possibilities) logging.info(sorted_possibilities) return sorted_possibilities[:20]
def search_index(Model, paginate_limit, query_string, cursor, index=None, transformer=None): """ Searches using the provided index (or an automatically determine one). Expects the search query to be in the ``query`` request parameter. Also takes care of setting pagination information if the :class:`pagination component <ferris.components.pagination.Pagnation>` is present. """ limit = paginate_limit response = dict() try: if cursor: cursor = search.Cursor(web_safe_string=cursor) else: cursor = search.Cursor() options = search.QueryOptions( limit=limit, ids_only=True, cursor=cursor) query = search.Query(query_string=query_string, options=options) if not index: if hasattr(Model, 'get_search_index'): index = Model.get_search_index() elif hasattr(Model, 'search_index_name'): index = Model.search_index_name else: index = 'auto_ix_%s' % Model._get_kind() index = search.Index(name=index) logging.debug( "Searching %s with \"%s\" and cursor %s" % (index.name, query.query_string, cursor.web_safe_string)) index_results = index.search(query) if issubclass(Model, ndb.Model): results = ndb.get_multi([ndb.Key(urlsafe=x.doc_id) for x in index_results]) if transformer is not None: results = [transformer(x) for x in results if x] else: results = [x for x in results if x] else: results = Model.get([x.doc_id for x in index_results]) Model.prefetch_references(results) if index_results.cursor: logging.info(index_results) response['limit'] = limit response['cursor'] = cursor.web_safe_string response['query_string'] = query_string response['next_cursor'] = index_results.cursor.web_safe_string except (search.Error, search.query_parser.QueryException) as e: results = [] logging.info("error occurred %s " % e) response['results'] = results return response
def search(repo, query_txt, max_results): """ Searches person with index. Args: repo: The name of repository query_txt: Search query max_results: The max number of results you want.(Maximum: 1000) Returns: results[<model.Person>, ...] Raises: search.Error: An error occurred when the index name is unknown or the query has syntax error. """ #TODO: Sanitaize query_txt results = [] if not query_txt: return results index = appengine_search.Index(name=PERSON_FULL_TEXT_INDEX_NAME) query_txt += ' AND (repo: ' + repo + ')' options = appengine_search.QueryOptions( limit=max_results, returned_fields=['record_id']) index_results = index.search(appengine_search.Query( query_string=query_txt, options=options)) for document in index_results: id = document.fields[0].value results.append(model.Person.get_by_key_name(repo + ':' + id)) return results
def find_documents(words, limit, cursor): try: # subject_desc = search.SortExpression( # expression='EventId', # direction=search.SortExpression.DESCENDING, # default_value='') # Sort up to 1000 matching results by subject in descending order #sort = search.SortOptions(expressions=[subject_desc], limit=1000) # Set query options options = search.QueryOptions( limit=limit, # the number of results to return cursor=cursor, #sort_options=sort, returned_fields=['EventId', 'content'], snippeted_fields=['content']) query_string = words query = search.Query(query_string=query_string, options=options) index = search.Index(name=_INDEX_NAME) # Execute the query return index.search(query) except search.Error: logging.exception('Search failed') return None
def _get_candidate_doc_events(self, ids_only=True): clauses = [] if self.query.bounds: # We try to keep searches as simple as possible, # using just AND queries on latitude/longitude. # But for stuff crossing +/-180 degrees, # we need to do an OR longitude query on each side. latitudes = (self.query.bounds[0][0], self.query.bounds[1][0]) longitudes = (self.query.bounds[0][1], self.query.bounds[1][1]) clauses += ['latitude >= %s AND latitude <= %s' % latitudes] if longitudes[0] < longitudes[1]: clauses += ['longitude >= %s AND longitude <= %s' % longitudes] else: clauses += [ '(longitude >= %s OR longitude <= %s)' % longitudes ] if self.query.keywords: clauses += ['(%s)' % self.query.keywords] #if self.query.min_likes: # clauses += ['like_count > %d' % self.query.min_likes] if clauses: full_search = ' '.join(clauses) logging.info("Doing search for %r", full_search) #TODO(lambert): implement pagination if ids_only: options = {'returned_fields': []} else: options = {'returned_fields': self.extra_fields} options = search.QueryOptions(limit=self.limit, **options) query = search.Query(query_string=full_search, options=options) doc_search_results = search_source.SourceIndex.search(query) return doc_search_results.results return []
def get(self, in_query): index = search.Index('spots') query_dict = parse_query(in_query) if query_dict[ 'query'] == '' and 'lat' not in query_dict and 'lon' not in query_dict: search_results = index.get_range(start_id="0", limit=10) else: # search_results = index.search(query_dict['query']) search_query = search.Query( query_string=query_dict['query'].strip(), options=get_query_options(query_dict)) search_results = index.search(search_query) features = [] for doc in search_results: if (doc.field('place').value.longitude != 0.0 and doc.field('place').value.latitude != 0.0): features.append(build_feature_dict(doc)) # features = in_query if len(features) == 0 else features dict = {"type": "FeatureCollection", "features": features} return util.jsonpify(dict) # curl http://127.0.0.1:8080/api/v1/post/a # AIzaSyAbcMGMULgp5l0Trav2G3OseIrNGIxHDZk # curl "http://127.0.0.1:8080/api/v1/post/a&-73.9858118&40.7701926" # curl "http://localhost:3000/api/v1/post/*&40.770696199999996&-73.9858118"
def get_results(self, query): # logging.info("get_results start \n{}".format(time.clock() * 1000 % 1000)) query_ascii = unidecode(query) query_ascii = Autocompleter.SANITIZE_PATTERN.sub("", query_ascii) logging.info(u"Autocomplete search for '{}' sanitized to '{}'.".format( query, query_ascii)) if not query_ascii: return [] # logging.info("before index search \n{}".format(time.clock() * 1000 % 1000)) results = self.index.search(query=search.Query( 'tokens:({})'.format(query_ascii), options=search.QueryOptions(limit=5, ids_only=True))) # logging.info("after index search \n{}".format(time.clock() * 1000 % 1000)) logging.info("Got {} results.".format(len(results.results))) assert (isinstance(results, search.SearchResults)) list_of_keys = [] for search_result in results.results: assert (isinstance(search_result, search.ScoredDocument)) key = ndb.Key('BookRecord', search_result.doc_id) list_of_keys.append(key) # logging.info("get_multi start \n{}".format(time.clock() * 1000 % 1000)) return ndb.get_multi(list_of_keys)
def post(self): enforce_login(self) if self.request.get( 'license') is not None and not users.is_current_user_admin(): self.redirect("/") epub = db.get(self.request.get('epub_key')) if not users.is_current_user_admin() and epub.entry_count() > 1: self.redirect("/") epub.language = self.request.get('language') epub.title = self.request.get('title') epub.creator = self.request.get('creator') epub.publisher = self.request.get('publisher') epub.rights = self.request.get('rights') epub.contributor = self.request.get('contributor') epub.identifier = self.request.get('identifier') epub.description = self.request.get('description') epub.date = self.request.get('date') license = self.request.get('license') if epub.license != license: if license == "Public Domain" or license == "Creative Commons": unpacker = unpack.Unpacker() unpacker.index_epub(epub, "public") else: index = search.Index("public") opts = search.QueryOptions(limit=1000, ids_only=True) query = search.Query(query_string="book:%s" % epub.key(), options=opts) docs = index.search(query) for doc in docs: index.remove(doc.doc_id) epub.license = self.request.get('license') epub.put() self.redirect("/book/" + str(epub.key().id()))
def get(self): super(SearchIndexHandler, self).get() index_name = self.request.get('index') if not index_name: self.redirect('/search') return start = self.request.get_range('start', min_value=0, default=0) query = self.request.get('query') namespace = self.request.get('namespace') index = search.Index(name=index_name, namespace=namespace) resp = index.search(query=search.Query( query_string=query, options=search.QueryOptions(offset=start, limit=self._MAX_RESULTS_PER_PAGE))) has_more = resp.number_found > start + self._MAX_RESULTS_PER_PAGE values = { 'namespace': namespace, 'index': index_name, 'start': start, 'query': query, 'values': self._process_search_response(resp), } self._handle_paging(start, has_more, values) self.response.write(self.render('search_index.html', values))
def get(self): searchv = self.request.get('s', '') cursorv = self.request.get('cursor', None) options = search.QueryOptions( limit=40, cursor=search.Cursor(web_safe_string=cursorv), ids_only=True) query = search.Query(query_string=searchv, options=options) index = search.Index(name=SEARCH_INDEX) results = index.search(query) results_keys = [ ndb.Key(*ndb.Key(urlsafe=i.doc_id).flat()) for i in results.results ] haikus = ndb.get_multi(results_keys) haikus_list = [h.ToDict() for h in haikus if h is not None] next_cursor = None if results.cursor: next_cursor = results.cursor.web_safe_string template = JINJA_ENVIRONMENT.get_template('haiku-search.html') self.response.out.write( template.render({ 'search': searchv, 'haikus': haikus_list, 'cursor': next_cursor, }))
def employees_matching_prefix(prefix): """Returns a list of (full name, username) tuples for users that match the given prefix.""" if not prefix: return [] user_tuples = set() search_query = search.Query(query_string=prefix, options=search.QueryOptions(limit=15)) results = search.Index(name=INDEX_NAME).search(search_query) for r in results: username, full_name = None, None for f in r.fields: if f.name == 'full_name': full_name = f.value elif f.name == 'username': username = f.value else: continue if username is not None and full_name is not None: photo_url = Employee.query( Employee.username == username).get().get_photo_url() user_tuples.add((full_name, username, photo_url)) user_tuples = list(user_tuples) user_tuples.sort() return user_tuples
def generateRatingsBuckets(cls, query_string): """Builds a dict of ratings 'buckets' and their counts, based on the value of the 'avg_rating" field for the documents retrieved by the given query. See the 'generateRatingsLinks' method. This information will be used to generate sidebar links that allow the user to drill down in query results based on rating. For demonstration purposes only; this will be expensive for large data sets. """ # do the query on the *full* search results # to generate the facet information, imitating what may in future be # provided by the FTS API. try: sq = search.Query(query_string=query_string.strip()) search_results = cls.getIndex().search(sq) except search.Error: logging.exception('An error occurred on search.') return None ratings_buckets = collections.defaultdict(int) # populate the buckets for res in search_results: ratings_buckets[int((cls(res)).getAvgRating() or 0)] += 1 return ratings_buckets
def get(self): template = JINJA_ENVIRONMENT.get_template( 'templates/members/member_list.html') countries = Country.all().order('order').fetch(LIMIT) statuses = Status.all().order('order').fetch(LIMIT) types = MemberType.all().order('order').fetch(LIMIT) # TODO: Make sensible query_string = '' current_search = self.request.get('search') query_string += current_search current_status = self.request.get('status') if current_status != '': if query_string != '': query_string += ' AND ' query_string += 'status:' + current_status current_type = self.request.get('type') if current_type != '': if query_string != '': query_string += ' AND ' query_string += 'type:' + current_type current_country = self.request.get('country') if current_country and current_country != '': if query_string != '': query_string += ' AND ' query_string += 'country:' + current_country index = search.Index(name='members') result = index.search(query=search.Query( query_string, options=search.QueryOptions(limit=LIMIT))) members = list() for document in result.results: members.append(Member.search_member_from_document(document)) members = sorted(members, key=lambda x: x.number) current_status_name = current_status current_type_name = current_type total = memcache.get('member_count') if not total: total = 0 data = { 'countries': countries, 'statuses': statuses, 'types': types, 'members': members, 'current_status': current_status, 'current_type': current_type, 'current_search': current_search, 'found': result.number_found, 'shown': len(members), 'total': total } self.response.write(template.render(data))
def get(self): query_type = self.request.get('query_type') latitude = self.request.get('lat') longitude = self.request.get('lon') geostring = "geopoint(%s,%s)" % (latitude,longitude) if query_type == "JOBS": index = search.Index(name="job_index") query_string = "distance(job_location, %s) < 5000" % geostring query = search.Query(query_string = query_string) query_result(self,index, query) else: index = search.Index(name="worker_index") query_string = "distance(worker_location, %s) < 5000" % geostring query = search.Query(query_string = query_string) query_result(self,index, query)
def get(self): inputText = self.request.get("query") print(inputText) print(type(inputText)) # A query string keyword = ".*" # print(keyword) # Build the Query and run the search query = search.Query(query_string=keyword) index = search.Index(name='ConnexStreamPool', namespace="Connex") result = index.search(query) streamlist = list() for doc in result.results: singleStream = \ stream_bundle.myStream.query(ancestor=management.stream_key(str(doc.fields[0].value))).fetch()[0] streamlist.append(singleStream.streamname) # for doc in result.results: # streamlist.append(str(doc.fields[0].value)) print(streamlist) output = [] for streamName in streamlist: if inputText in streamName: output.append(streamName) print(output) output = json.dumps(output) self.response.headers['Content-Type'] = 'application/json' self.response.out.write(output)