def connect(self, **kwargs): if self.ping(): return self._pre_connect(**kwargs) ex = ConnectionError for connect_attempt in range(1, self._max_connect_attempts + 1): try: conn = redis.Redis( host=self._host, port=self._port, db=self._n, **self._conn_info, ) self._conn = redisearch.Client(self._index, conn=conn) if self.ping(): break except redis.exceptions.ConnectionError as exc: # NOTE: This variable set is to prevent linter errors because # it does not recognizes 'name' in 'exception ... as name'. ex = exc print('Warning: failed connecting to RediSearch database at ' f'{self._host:self._port}, reconnection attempt ' f'{connect_attempt} ...', file=sys.stderr) time.sleep(1) else: raise ex( 'failed connecting to RediSearch database at ' f'{self._host:self._port}.' ) self._post_connect()
def get_file_fromdb(filetype, taskid): """Download a file.""" task = db.hgetall("%s:cols" % taskid) cols = ast.literal_eval(task['cols']) sep = "\t" if filetype == "tsv" else "," csv = "%s\n" % sep.join(cols) client = redisearch.Client(taskid) num_docs = int(client.info()['num_docs']) for i in range(0, num_docs): doc = client.load_document("%s_%d" % (taskid, i)) listparam = [] for col in cols: if col == "TF_gene": listparam.append("\"%s\"" % eval("doc.%s" % col)) else: listparam.append(eval("doc.%s" % col)) csv += sep.join(listparam) if i != num_docs - 1: csv += "\n" ''' return the csv/tsv file without having to save it ''' return Response(csv, mimetype="text/csv", headers={ "Content-disposition": "attachment; filename=prediction_result-%s.%s" % (taskid, filetype) })
def drop_index(task_id): ''' Make this a celery task so we can schedule it -- done? ''' print("Remove key/index for %s from redis" % task_id) client = redisearch.Client(task_id) client.drop_index() db.delete(task_id) db.delete("%s:cols" % task_id)
def _get_redisearch_index_client(index: str) -> redisearch.Client: """Get an initialized redisearch client for an index Args: index: Enum for the relevant index Returns: Initialized redisearch client for given index """ global _redis_connection if _redis_connection is None: _redis_connection = dragonchain_redis._initialize_redis( host=REDISEARCH_ENDPOINT, port=REDIS_PORT) return redisearch.Client(index, conn=_redis_connection)
def _get_redisearch_index_client(index: str) -> redisearch.Client: """Get an initialized redisearch client for an index Args: index: Enum for the relevant index Returns: Initialized redisearch client for given index """ global _redis_connection if _redis_connection is None: if not ENABLED: raise RuntimeError("Redisearch was attempted to be used, but is disabled") _redis_connection = dragonchain_redis._initialize_redis(host=REDISEARCH_ENDPOINT, port=REDIS_PORT) return redisearch.Client(index, conn=_redis_connection)
def get_redisearch_cli(chat_id): idx = get_index_name(chat_id) # TODO supports for redis authentication & cluster cli = redisearch.Client(idx, host=REDIS_HOST, port=REDIS_PORT) logger.debug('get client with idx %s for chat %s', idx, chat_id) try: # cli.drop_index() # TODO dedicate API for dropping index cli.create_index([ redisearch.TextField('msg', weight=5.), redisearch.TextField('msg_id', weight=0.), redisearch.TextField('user', weight=0.), redisearch.TextField('ts', weight=0.), ]) except redis.exceptions.ResponseError as e: if e.message != 'Index already exists': raise return cli
def __init__(self): # setup redis clients self.r = redis.Redis(host=redis_host, port=redis_port) self.rs = redisearch.Client('product_name', host=redis_host, port=redis_port) try: self.rs.create_index( (redisearch.NumericField('id'), redisearch.TextField('name'), redisearch.TextField('description'), redisearch.TextField('vendor'), redisearch.NumericField('price'), redisearch.TextField('currency'), redisearch.TextField('category'), redisearch.TextField('images'))) except Exception: print(f'error creating index') print(f'index info: {self.rs.info()}')
def filter_fromdb(task_id, search_filter, start, length=-1, order_col="row", order_asc=True): ''' task_id, search_filter, start,length = -1, order_col="row", order_asc=True ''' result = {} client = redisearch.Client(task_id) result['recordsTotal'] = int(client.info()['num_docs']) #manual = False # manually made because redisearch sucks if length == -1: length = result['recordsTotal'] - start # if there is filter or length == -1 we return everything # hay que devolver todo porque necesitamos contar el número de filas if search_filter: query = redisearch.Query("*").sort_by(order_col, order_asc).paging( 0, result['recordsTotal']) documents = client.search(query).docs filtered_docs = list( filter(lambda doc: dofilter(search_filter, doc), documents)) result['recordsFiltered'] = len(filtered_docs) result['data'] = filtered_docs[start:start + length] else: query = redisearch.Query("*").sort_by(order_col, order_asc).paging(start, length) res = client.search(query) result['recordsFiltered'] = res.total result['data'] = res.docs #if searchtype == "exclude": # #searchtext = searchquery.replace(">","\\>") -- @col:query # querystr = "-(@%s:\"%s\")" % (colname,searchquery) #else: #searchtype == "exact" # querystr = "@%s:\"%s\"" % (colname,searchquery) return result
def savetoredis(req_id, colnames, datavalues, expired_time): db.hmset("%s:cols" % req_id, {'cols': colnames}) client = redisearch.Client(req_id) indexes = [] for col in colnames: if "score" in col or "diff" in col or "row" in col or "z_score" in col or "p_value" in col: indexes.append(redisearch.NumericField(col, sortable=True)) else: indexes.append(redisearch.TextField(col, sortable=True)) client.create_index(indexes) for i in range(0, len(datavalues)): fields = { colnames[j]: datavalues[i][colnames[j]] for j in range(0, len(colnames)) } client.add_document("%s_%d" % (req_id, i), **fields) # ---- set expiry for columns and documents ---- #db.expire("%s:cols"%req_id,expired_time) let's comment for now and see how it goes drop_index.apply_async((req_id, ), countdown=expired_time)
import redis import redisearch import json from . import config redis_conn = redis.Redis( host=config.settings.redis_host, port=config.settings.redis_port, decode_responses=True, ) redis_search = redisearch.Client("apps_search", conn=redis_conn) def initialize(): apps = redis_conn.smembers("apps:index") if not apps: try: redis_search.create_index([ redisearch.TextField("appid"), redisearch.TextField("name"), redisearch.TextField("summary"), redisearch.TextField("description", 0.2), redisearch.TextField("keywords"), ]) except: pass def search(userquery: str):
import redis import logging import time import redisearch # Open handle for redis-server installed at CentOS Spark VM using Redissearch package. index_name = 'govbld' redis_server = '10.0.0.141' redis_port = '6379' rs = redisearch.Client(index_name, host=redis_server, port=redis_port) # Open handle for redis-server installed at CentOS VM using redis package. r = redis.Redis(host=redis_server, port=redis_port) # Get logger to log proper Info/Debug messages. logging.basicConfig(level=logging.INFO, format='%(asctime)s %(name)s %(levelname)s %(message)s') logger = logging.getLogger('Search_Redis') # To calculate total time to get required result. t = time.process_time() # search same address using redissearch api search_address = '5100 E WINNEMUCCA BLVD' try: for doc in rs.search(search_address).docs: logger.info(f'{doc}') found_locationcode = doc.id total_parking_spaces = doc.Total_Parking_Spaces except Exception as err: raise err
def main(): print("hello!") r = redis.Redis(host=redis_host, port=redis_port) rs = redisearch.Client('recordIndex', redis_host, redis_port) # flush to get a fresh db # TODO - remove when dockerized r.flushall() record_collection = [{ 'title': 'Brothers and Sisters', 'artist': 'Allman Brothers', 'year': 1973, 'genre': ['rock', 'southern rock', 'blues rock'] }, { 'title': 'Aja', 'artist': 'Steely Dan', 'year': 1977, 'genre': ['rock', 'pop'] }, { 'title': 'Can\'t Buy a Thrill', 'artist': 'Steely Dan', 'year': 1972, 'genre': ['rock', 'pop'] }, { 'title': 'Deguello', 'artist': 'ZZ Top', 'year': 1979, 'genre': ['rock'] }, { 'title': 'American Beauty', 'artist': 'Grateful Dead', 'year': 1970, 'genre': ['rock', 'psychedelic rock'] }, { 'title': 'Second Helping', 'artist': 'Lynard Skynard', 'year': 1974, 'genre': ['rock', 'southern rock'] }, { 'title': 'The Joker', 'artist': 'Steve Biller Band', 'year': 1973, 'genre': ['rock', 'blues rock'] }, { 'title': 'Book of Dreams', 'artist': 'Steve Biller Band', 'year': 1977, 'genre': ['rock'] }, { 'title': 'Rumours', 'artist': 'Fleetwood Mac', 'year': 1977, 'genre': ['rock', 'pop'] }, { 'title': 'Where We All Belong', 'artist': 'Marshall Tucker Band', 'year': 1974, 'genre': ['rock', 'southern rock'] }] try: rs.create_index((redisearch.TextField('title', sortable=True), redisearch.TextField('artist', sortable=True), redisearch.NumericField('year', sortable=True), redisearch.TagField('genre', separator=','))) except Exception: print(f'Error creating index: {sys.exc_info()}') print(f'index info: {rs.info()}') run = True load_data(rs, record_collection) while run: txt = input("enter a search term: ") if (txt == "quit"): run = False break txt_arr = txt.split(' ', 1) print(f'searching {txt_arr}') if (txt_arr[0] == 'title'): res = rs.search(f'@title:{txt_arr[1]}') print(res) elif (txt_arr[0] == 'artist'): res = rs.search(f'@artist:{txt_arr[1]}') print(res) elif (txt_arr[0] == 'year'): full_txt_arr = txt.split(' ') former = full_txt_arr[1] latter = full_txt_arr[1] if (len(full_txt_arr) == 3): latter = full_txt_arr[2] res = rs.search(f'@year:[{former} {latter}]') print(res) elif (txt_arr[0] == 'genre'): pass else: print("invalid query")