示例#1
0
def search_data(company):
    print(Query(company).limit_fields('title').verbatim().summarize())
    j = client.search(
        Query(company).limit_fields('title').verbatim()).docs[0].__dict__
    del j['id']
    del j['payload']
    return (j)
示例#2
0
    def viewModules(self, query=None, sort=None):
        if not query:
            # Use a purely negative query to get all modules
            query = '-etaoinshrdlu'
        q = Query(query).no_content().paging(0, 1000)
        if sort:
            if sort == 'relevance':
                pass
            elif sort == 'update':
                q.sort_by('last_modified')
            elif sort == 'stars':
                q.sort_by('stargazers_count', asc=False)
            elif sort == 'forks':
                q.sort_by('forks_count', asc=False)
            elif sort == 'name':
                q.sort_by('name')

        results = self.sconn.search(q)
        mods = []
        fetch_duration = 0
        # TODO: this should be pipelined
        for doc in results.docs:
            m = RedisModule(self.dconn, self.sconn, self.autocomplete, doc.id)
            res, duration = _durationms(m.to_dict)
            mods.append(res)
            fetch_duration += duration

        return {
            'results': results.total,
            'search_duration': '{:.3f}'.format(results.duration),
            'fetch_duration': '{:.3f}'.format(fetch_duration),
            'total_duration':
            '{:.3f}'.format(fetch_duration + results.duration),
            'modules': mods,
        }
示例#3
0
 def paginate(self,
              query: str = "*",
              page: int = 1,
              num: int = 10,
              sort_by: str = 'id',
              direction: bool = True,
              slop: int = 0) -> Pagination:
     try:
         tic = time.perf_counter()
         start = (page - 1) * num
         # count total of docs to calculate the total of pages
         total = self.idx.search(Query(query).slop(slop).paging(0, 0)).total
         # construct the query, paginated start and num
         q = Query(query).slop(slop).sort_by(sort_by,
                                             direction).paging(start, num)
         # perform the query
         items = self.idx.search(q).docs
         elapsed_time = time.perf_counter() - tic
         logger.debug(
             f"Pagination over {self.prefix}({query}) with {num} of {total} results done in {(elapsed_time*1000):0.3f}ms"
         )
         p = Pagination(page=page, per_page=num, total=total, items=items)
         return p
     except Exception as ex:
         raise rSearchException(str(ex), {'query': query})
示例#4
0
 def normal_find_ids(self, limit_ids=None):
     _query = self.query_builder.build()
     q = Query(_query).no_content().paging(0, 1000000)
     if limit_ids is not None and len(limit_ids) > 0:
         q.limit_ids(*limit_ids)
     results = self.client.search(q)
     result_docs = results.docs
     return [res.id for res in result_docs]
示例#5
0
    def normal_find(self, limit_ids=None):
        built = self.query_builder.build()
        q = Query(built).paging(0, 1000000)
        if limit_ids is not None and len(limit_ids) > 0:
            q.limit_ids(*limit_ids)

        results = self.client.search(q)
        result_docs = results.docs
        return result_docs
示例#6
0
def index():
    query = '+@abv:[2 7] +@ibu:[1 +inf]'
    q = Query(query)
    result = g.rsbeer.search(q)
    res = docs_to_dict(result.docs)

    return render_template(
        'index.html',
        title='Home',
        count=result.total,
        duration=result.duration,
        rsindex=g.rsbeer.info()['index_name'],
        rsquery=q.query_string(),
        result=res
    )
def display_ceo():
   form = request.form.to_dict()
   try:
      ceos = [(lambda x: [x.company, x.ceo, x.ceoTitle]) (x) for x in client.search(Query(form["ceo"]).limit_fields('ceo')).docs]
      return render_template('displayceos.html', ceos = ceos)
   except Exception as e:
      return "<html><body><script> var timer = setTimeout(function() { window.location='/searchceo' }, 5000); </script> Bad Query : %s try again with  &percnt;NAME&percnt;</body> </html>" % e
示例#8
0
def general_search(request) -> Response:
    """
    Default full text search on all resources if no sources are specified.

    Faceted search if sources are specified.

    **query**: Query to search.
    **source**: Multiple sources can be specifed.
    """

    client = Client(INDEX_NAME, conn=get_redis_connection())

    query = request.GET.get('query')
    sort_stars = request.GET.get('sort-stars')
    resources = request.GET.getlist('source')
    languages = request.GET.getlist('language')
    awesome_lists = request.GET.getlist('awesome-list')

    query = format_query(query, resources, languages, awesome_lists)
    results = client.search(Query(query))
    results = [doc.__dict__ for doc in results.docs]
    if sort_stars == "true":
        results.sort(key=lambda x: int(x['stargazers_count']), reverse=True)

    return Response({
        "docs": results
    })
示例#9
0
def test():
    # Creating a client with a given index name
    client = Client('myIndex')

    # Creating the index definition and schema
    client.drop_index()
    client.create_index([TextField('title', weight=5.0), TextField('body')])

    # Indexing a document
    client.add_document(
        'doc1',
        title='RediSearch',
        body='Redisearch implements a search engine on top of redis')

    # Simple search
    res = client.search("search engine")

    # the result has the total number of results, and a list of documents
    print res.total  # "1"
    print res.docs[0]

    # Searching with snippets
    # res = client.search("search engine", snippet_sizes={'body': 50})

    # Searching with complex parameters:
    q = Query("search engine").verbatim().no_content().paging(0, 5)
    res = client.search(q)
示例#10
0
def some(page):
    res = client.search(
        Query("*").limit_fields('match').paging(page, 5).with_payloads())
    return jsonify({
        "results": [doc.__dict__ for doc in res.docs],
        "total": res.total
    })
示例#11
0
def search():
    query = request.args.get("search") + "*"
    res = client.search(
        Query(query).limit_fields('title', 'body').with_payloads())
    return jsonify({
        "results": [doc.__dict__ for doc in res.docs],
        "total": res.total
    })
示例#12
0
 def py_search(self, query, result_limit=-1):
     if result_limit == -1:
         result_limit = self.LIMIT
     try:
         return self.client.search(Query(query).paging(0, result_limit))
     except Exception as e:
         print >> sys.stderr, "TAS_Redisearch Error inside py_search Index:\'", self.table_name, "\' HOST:\'", self.host, "\' PORT:\'", self.port, "\'\n"
         print sys.stderr, e
示例#13
0
 def search_exact_Query(self, string):
     string = self.StringEscape(string)
     query = "(@look_cmp:%s*)|(@cmp_k:%s*)" % (string, string)
     res = self.client.search(Query(query).paging(0, 10000))
     arr = []
     for x in res.docs:
         arr.append({"k": x.cmp_k, "n": x.cmp_name})
     arr.sort(key=lambda x: len(x['n']))
     return [{"message": "done", "data": arr}]
示例#14
0
    def search(self, filters, page, per_page):
        """
        Searches through redis
        :return:
        """
        q = Query(self.build_query(filters)).paging(
            (page - 1) * per_page, per_page).sort_by("user_id")

        return self.client.search(q)
示例#15
0
    def search_using_Query(self,search_text,index):
	search_text = search_text
	query = '@DATA:"%s"'%search_text
	#,search_text+"*")
	#query = '@BBOX:"%s"'%('109')
	res = self.client.search(Query(query).paging(0, 10000))
	fs = []
	if res:
    		for i,rr in enumerate(res.docs):
			fs.append([rr.DOCID,rr.SECTION_TYPE,rr.GRIDID,rr.BBOX,rr.ROWCOL,rr.DATA,rr.id,rr.PAGE])
	return fs
示例#16
0
def recherches_adresses():
    query = request.args.get("q")
    q = Query(query).language('french').paging(0, 10)
    res = client.search(q)
    adresses = {}
    for i, doc in enumerate(res.docs):
        adresses[i] = {
            "value": doc.id.replace("addr:", ""),
            "label": doc.adresse
        }
    return jsonify(adresses=adresses)
示例#17
0
def display_tags():
    tags = request.form.getlist('tgs')
    q = Query("@tags:{%s}" % ("|".join(tags))).sort_by('rank',
                                                       asc=True).paging(
                                                           0, 100)
    res = [(lambda x: [x.rank, x.company, x.tags])(x)
           for x in client.search(q).docs]
    return render_template(
        'displaytags.html',
        query='FT.SEARCH fortune500 "@tags:{{{}}}" SORTBY rank ASC LIMIT 0 100'
        .format("|".join(tags)),
        companies=res)
示例#18
0
def product_search(request):
    search_key = request.POST.get('search_key', "").strip()
    if len(search_key) == 0:
        return JsonResponse({'product_detail_list': []})
    for t in [
            'tee', 't shirt', 't-shirt', 'tees', 't shirts', 't-shirts',
            'tshirts'
    ]:
        search_key = 'tshirt' if search_key == t else search_key
    client = Client('productIndex')
    q = Query(search_key)
    q.paging(0, 60)
    product_id_list = []
    try:
        res = client.search(q)
        for data in res.docs:
            product_id_list.append(data.id)
    except Exception:
        index = create_product_search_index()
        create_product_autocompleter()
        res = client.search(q)
        for data in res.docs:
            product_id_list.append(data.id)
    if len(product_id_list) == 0:
        sk = search_key.split()
        for substr in sk:
            if len(substr) > 0:
                q._query_string = substr
                res = client.search(q)
                for data in res.docs:
                    product_id_list.append(data.id)
        product_id_list = list(set(product_id_list))
    product_detail_list = product_view.cached_product_detail(product_id_list)
    context = {
        'product_detail_list': product_detail_list,
        'total_number_of_products': len(product_detail_list),
        'no_of_products': len(product_detail_list),
        'subtypes': True,
    }
    return JsonResponse(context)
示例#19
0
    def clientpush(self):
        client = Client('Checkout')

        client.create_index([
            NumericField('Key'),
            TextField('UsageClass'),
            TextField('CheckoutType'),
            TextField('MaterialType'),
            NumericField('CheckoutYear'),
            NumericField('CheckoutMonth'),
            NumericField('Checkouts'),
            TextField('Title'),
            TextField('Creator'),
            TextField('Subjects'),
            TextField('Publisher'),
            TextField('PublicationYear')
        ])

        db_connection, _ = self.connect()
        cursor = db_connection.cursor()
        cursor.execute('SELECT * FROM customers')
        results = cursor.fetchall()
        i = 0
        for result in results:
            client.add_document('doc%s' % i,
                                Key=result[0],
                                UsageClass=result[1],
                                CheckoutType=result[2],
                                MaterialType=result[3],
                                CheckoutYear=result[4],
                                CheckoutMonth=result[5],
                                Checkouts=result[6],
                                Title=result[7],
                                Creator=result[8],
                                Subjects=result[9],
                                Publisher=result[10],
                                PublicationYear=result[11])
            i += 1
            print(i)
        res = client.search('BOOK')

        print("{}   {}".format(res.total, res.docs[0].Title))
        res1 = client.search("use")
        print(res1)
        q = Query('use').verbatim().no_content().paging(0, 5)
        res1 = client.search(q)
        print(res1)
        cursor.close()
        db_connection.close()
示例#20
0
def searchdb(search_content):
    global total
    client = Client("BoxGroup", port=6379)
    search_content = ' '.join(jieba.cut(search_content))
    q = Query(search_content).verbatim().paging(0, 500)
    res = client.search(q)
    total = res.total
    titlelist = []
    i = 0
    while i < res.total:
        titlelist.append(res.docs[i].title)
        i += 1
    if res.total > 0:
        return titlelist
    elif res.total == 0:
        return "No result found"
示例#21
0
 def search(cls, query, offset=0, paginate=10):
     client = Client("tower", port=6379, host=os.getenv('REDIS_HOST'))
     q = Query(query).paging(offset, paginate)
     res = client.search(q)
     result = []
     for doc in res.docs:
         value_dict = {
             'id': doc.id,
             'client_ip': doc.clientIp,
             'service': doc.service,
             'error_message': doc.errorMessage,
             'stack_trace': doc.stackTrace,
             'numberRange': doc.numberRange
         }
         result.append(value_dict)
     print(res)
     return result
示例#22
0
def parse(query: str, search_site: SiteConfiguration) -> Query:
    # Dash postfixes confuse the query parser.
    query = query.strip().replace("-*", "*")
    query = UNSAFE_CHARS.sub(' ', query)
    query = query.strip()

    # For queries of a term that should result in an exact match, e.g.
    # "insight" (a synonym of RedisInsight), or "active-active", strip any star
    # postfix to avoid the query becoming a prefix search.
    if query.endswith('*'):
        exact_match_query = query.rstrip("*")
        if exact_match_query in search_site.all_synonyms:
            query = exact_match_query

    print(query)

    return Query(query).summarize('body', context_len=10).highlight(
        ('title', 'body', 'section_title'))
示例#23
0
def get_request_similar_skus():
    vector = image_url_to_vector(request.args['imageUrl'])

    # vector to base to BASE64
    base64_vector = base64.b64encode(vector).decode('ascii')
    base64_vector_escaped = base64_vector.translate(
        str.maketrans({
            "=": r"\=",
            "/": r"\/",
            "+": r"\+"
        }))

    q = Query('@vector:[' + base64_vector_escaped + ' range 5]').return_fields(
        'sku', 'imageUrl')
    result = []
    for doc in client.search(q).docs:
        result.append({'sku': doc.sku, 'imageUrl': doc.imageUrl})

    return result
示例#24
0
def product_search(query, limit=10, fuzzy_search=True):
    search_results = {"from_redisearch": True, "results": []}

    if not is_redisearch_enabled():
        # Redisearch module not enabled
        search_results["from_redisearch"] = False
        search_results["results"] = get_product_data(query, 0, limit)
        return search_results

    if not query:
        return search_results

    red = frappe.cache()
    query = clean_up_query(query)

    # TODO: Check perf/correctness with Suggestions & Query vs only Query
    # TODO: Use Levenshtein Distance in Query (max=3)
    ac = AutoCompleter(make_key(WEBSITE_ITEM_NAME_AUTOCOMPLETE), conn=red)
    client = Client(make_key(WEBSITE_ITEM_INDEX), conn=red)
    suggestions = ac.get_suggestions(
        query,
        num=limit,
        fuzzy=fuzzy_search
        and len(query) > 3  # Fuzzy on length < 3 can be real slow
    )

    # Build a query
    query_string = query

    for s in suggestions:
        query_string += f"|('{clean_up_query(s.string)}')"

    q = Query(query_string)

    results = client.search(q)
    search_results["results"] = list(map(convert_to_dict, results.docs))
    search_results["results"] = sorted(
        search_results["results"],
        key=lambda k: frappe.utils.cint(k["ranking"]),
        reverse=True)

    return search_results
示例#25
0
def parse(query: str, section: str, search_site: SiteConfiguration) -> Query:
    # Dash postfixes confuse the query parser.
    query = query.strip().replace("-*", "*")
    query = UNSAFE_CHARS.sub(' ', query)
    query = query.strip()

    # For queries of a term that should result in an exact match, e.g.
    # "insight" (a synonym of RedisInsight), or "active-active", strip any star
    # postfix to avoid the query becoming a prefix search.
    if query.endswith('*'):
        exact_match_query = query.rstrip("*")
        if exact_match_query in search_site.all_synonyms:
            query = exact_match_query

    if query and section:
        # Boost results in the section the user is currently browsing.
        query = f"((@s:{section}) => {{$weight: 10}} {query}) | {query}"

    return Query(query).summarize('body', context_len=10,
                                  num_frags=1).highlight(
                                      ('title', 'body', 'section_title'))
示例#26
0
def get_search_results(keyword: str):
    '''
    Tries to retrieve search results from vulnerability databases, returns a list or None if record doesn't exist
    '''
    # Simple search
    if "CVE-" in keyword:
        keyword = keyword.replace("CVE-", "").replace(
            "-", " ")  # Cve search, search is different

    # Sanitize special characters
    keyword = keyword.replace(':', 'cc11').replace('.', 'pp22').replace(
        '*', 'ss33').replace('pp22ss33',
                             'pp22*')  # So 1.* version string wildcards work
    query = Query(keyword).paging(0, 1000000)
    res = client.search(query)
    for doc in res.docs:
        sanitized = doc.configurations \
        .replace("'",'"') \
        .replace("True", "true") \
        .replace("False", "false") \
        .replace('cc11',':').replace('pp22','.').replace('ss33','*') \
        .replace('\\\\','/bck') \
        .replace('/bck"','') \
        .replace('/bck','\\\\') # this is a hack to sanitize invalid json strings
        doc.configurations = jsonpickle.decode(sanitized)
        doc.description = doc.description.replace('cc11', ':').replace(
            'pp22', '.').replace('ss33', '*').replace('-',
                                                      '_')  # Undo escaping

    finalRes = [
        CVE(doc.id.replace('cve:', ''),
            vul_description=doc.description,
            sources=['nvd'],
            cpeConfigurations=doc.configurations) for doc in res.docs
    ]
    return finalRes

    # the result has the total number of results, and a list of documents
    '''print(res.total) # "2"
示例#27
0
 def search(self,
            query: str = "*",
            start: int = 0,
            num: int = 10,
            sort_by: str = 'id',
            direction: bool = True,
            slop=0) -> list:
     """ perform a query with the index
         ## Param
         * query - is the string query
         * start - page form record start
         * num - number of records to include into the result
         * sort_by - field to order by, defaul: *id*
         * direction - asc True desc False
         * slop - number of non matched terms (Levensthein distance), default: *0*
         ## Exception
         rSearchException
         ## Return 
         A list of records
     """
     try:
         q = Query(query).slop(slop).sort_by(sort_by,
                                             direction).paging(start, num)
         result = self.idx.search(q)
         if len(self.dependant) == 0:
             return result
         # discover first level foreign keys
         docs = result.docs
         if result.total > 0:  # and len(self.dependant)>0:
             docs_with_discover = []  # new list of docs
             # for each document
             for doc in self.db.docs_to_dict(result.docs):
                 # append to the list of new docs
                 docs_with_discover.append(self.discover(doc))
             docs = docs_with_discover
         # return the result as a resisearch result
         return DotMap(total=result.total, docs=docs)
     except Exception as ex:
         raise rSearchException(str(ex), {'query': query})
示例#28
0
    def get_query(self, query_string, limit_range, filters):
        INF = '+inf'
        NEG_INF = '-inf'

        q = Query(query_string)

        for index in self.numeric_indexes:
            lower, upper = NEG_INF, INF
            if filters.get(index) is not None:
                lower, upper = filters[index].split(',')
            q.add_filter(NumericFilter(index, lower, upper))

        # default paging is performed for only 10 entries
        # We allow returning all entries (4000) and
        # paging is performed through the limit ids.
        q.limit_ids(*limit_range).paging(0, 4000)

        return q
示例#29
0
 def _perform_query(self):
     q = Query(self._prepare_query()).with_scores()
     res = client.search(q)
     return res
示例#30
0
 def search(self, query):
     q = Query(query).paging(0, 5).verbatim()
     res = self.client.search(q)
     # print res.total # "1"
     return res