def get_rc(): redis_conn = StrictRedis( host = server_config.REDIS_HOST, port = server_config.REDIS_PORT, password = server_config.REDIS_PW ) if server_config.REDIS_PW != '': redis_conn.execute_command('AUTH %s' % server_config.REDIS_PW) return redis_conn
def overall_redis_session(): """Create a session-level connection to a temporary redis server.""" # list of redis modules that need to be loaded (would be much nicer to do # this automatically somehow, maybe reading from the real redis.conf?) redis_modules = [ '/opt/redis-cell/libredis_cell.so', ] with RedisServer() as temp_redis_server: redis = StrictRedis(**temp_redis_server.dsn()) for module in redis_modules: redis.execute_command('MODULE LOAD', module) yield redis
def prep_redis(file_): try: LOGGER.info('Processing %s', file_) parts = urlsplit(os.environ.get('REDIS_URI', 'redis://localhost')) redis = StrictRedis(host=parts.hostname, port=parts.port or 6379, db=parts.path[1:] or 0) with open(file_) as fh: config = json.load(fh) for command, entries in config.items(): for name, values in entries.items(): redis.execute_command(command, name, *values) except Exception: LOGGER.exception('Failed to execute redis commands.') sys.exit(-1)
def test_redis_subscribed_channels_leak(self, manager): if not manager.app.conf.result_backend.startswith('redis'): raise pytest.skip('Requires redis result backend.') redis_client = StrictRedis() async_result = chord([add.s(5, 6), add.s(6, 7)])(delayed_sum.s()) for _ in range(TIMEOUT): if async_result.state == 'STARTED': break sleep(0.2) channels_before = \ len(redis_client.execute_command('PUBSUB CHANNELS')) assert async_result.get(timeout=TIMEOUT) == 24 channels_after = \ len(redis_client.execute_command('PUBSUB CHANNELS')) assert channels_after < channels_before
class RedisDB: """ Сервис для удобного взаимодействия с базой данных Redis """ def __init__(self): self.store = StrictRedis(host=os.getenv('REDIS_HOST', '127.0.0.1'), port=os.getenv('REDIS_PORT', '6379')) def _get_current_unix_time(self) -> int: """ Метод, возвращающий текущее время в unix формате :return: unix time (int format) """ return self.store.execute_command('TIME')[0] def zadd_with_unix_time(self, key: str, values: list) -> int: """ Модифицированный метод zadd для удобного хранения ссылок под одним ключом, но с разными значениями и информацией о времени в формате unix в виде score :param key: Ключ :param values: Значения :return: Количество добавленных значений """ score = self._get_current_unix_time() values_score = {value: score for value in values} return self.store.zadd(key, values_score) def zrange_decoded(self, key: str, start=0, end=-1): """ Модифицированный метод zrange, который декодирует байтовые строки полученные из Redis в обычные :param key: Ключ :param start: Начальный индекс :param end: Конечный индекс :return: Декодированный список """ data = self.store.zrange(key, start, end) decoded_data = [] for value in data: decoded_data.append(value.decode('UTF-8')) return decoded_data def zrange_by_unix_time(self, key: str, from_unix_time: int, to_unix_time: int) -> list: """ Модифицированный метод zrangebyscore, который декодирует байтовые строки в обычные и производит фильтрацию по unix времени, который хранится в score :param key: Ключ :param from_unix_time: Начало указанного промежутка времени :param to_unix_time: Конец указанного промежутка времени :return: Отфильтрованный список с обыными строками """ links = self.store.zrangebyscore(key, from_unix_time, to_unix_time) decoded_links = [] for link in links: decoded_links.append(link.decode('UTF-8')) return decoded_links
def is_breached_password(password: str) -> bool: """Return whether the password is in the breached-passwords list.""" redis = StrictRedis(unix_socket_path=BREACHED_PASSWORDS_REDIS_SOCKET) hashed = sha1(password.encode('utf-8')).hexdigest() try: return bool( redis.execute_command('BF.EXISTS', BREACHED_PASSWORDS_BF_KEY, hashed)) except (ConnectionError, ResponseError): # server isn't running, bloom filter doesn't exist or the key is a # different data type return False
def get_redis_link_wrapper(host, port, decode_responses=False): link = StrictRedis(host="127.0.0.1", port=7000, decode_responses=True) # Missing slot 5460 bad_slots_resp = [ [0, 5459, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]], [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]], [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.1', 7005]], ] # Missing slot 5460 link.execute_command = lambda *args: bad_slots_resp return link
class CheckSentinelQuorum(SensuPluginCheck): def setup(self): # Setup is called with self.parser set and is responsible for setting up # self.options before the run method is called self.parser.add_argument( '-H', '--host', required=True, type=str, help='Hostname or IP address of one sentinel server' ) self.parser.add_argument( '-p', '--port', required=True, type=int, help='Port number of one sentinel server' ) self.parser.add_argument( '-C', '--cluster', required=True, type=str, help='Cluster name of one sentinel server' ) self.parser.add_argument( '-c', '--critical', required=True, type=int, help='Integer critical level to output' ) def run(self): self.conn = StrictRedis(host=self.options.host, port=self.options.port, socket_timeout=5) try: response = self.conn.execute_command("SENTINEL CKQUORUM {0}".format(self.options.cluster)) except ResponseError as e: response = e.message quorum = response.startswith('OK') if quorum != self.options.critical: self.ok("Quorum is OK: {0}".format(response)) else: self.critical("Quorum is KO: {0}".format(response))
def cluster_ready(self, proxies): # Overmoon will cache the data so once it's ready, # you can't say it will be ready in the future. # Maybe the latest metadata are still not synchronized to the proxies. for proxy in proxies: client = StrictRedis(host=proxy['host'], port=proxy['port']) try: r = client.execute_command('cluster', 'nodes') except Exception as e: raise Exception('{}:{}: {}'.format(proxy['host'], proxy['port'], e)) lines = list(r.decode('utf-8').split('\n')) if len(lines) <= 1: return False TRIMMED_LEN = 20 # This should not be the info from the last cluster. if self.cluster_name[0:TRIMMED_LEN] not in lines[0]: return False return True
def get_redis_link_wrapper(*args, **kwargs): link = StrictRedis(host="127.0.0.1", port=7000, decode_responses=True) orig_exec_method = link.execute_command def patch_execute_command(*args, **kwargs): if args == ('cluster', 'slots'): # Missing slot 5460 return [ [0, 5459, [b'127.0.0.1', 7000], [b'127.0.0.1', 7003]], [5461, 10922, [b'127.0.0.1', 7001], [b'127.0.0.1', 7004]], [10923, 16383, [b'127.0.0.1', 7002], [b'127.0.0.1', 7005]], ] return orig_exec_method(*args, **kwargs) # Missing slot 5460 link.execute_command = patch_execute_command return link
class OrderBookManager: BATCH_SIZE = 10 def __init__(self, product_manager: ProductManager) -> None: self.product_manager = product_manager self.order_books = {product_id: OrderBook(product_manager.get_product(product_id)) for product_id in self.product_manager.get_product_ids()} self.network_manager = NetworkManager() self.redis_server = StrictRedis(host='localhost', port=6379, db=0, encoding="utf-8", decode_responses=True) def get_order_book(self, product_id: str) -> OrderBook: return self.order_books[product_id] def get_network_manager(self) -> NetworkManager: return self.network_manager def update_network_manager(self) -> NetworkManager: for side in OrderSide: products = self.redis_server.execute_command('SPOP', self.__get_pr_redis_key(side), self.BATCH_SIZE) for next_product in products: self.network_manager.update_from_order_book(self.get_order_book(next_product), side) return self.get_network_manager() @staticmethod def __get_pr_redis_key(side: OrderSide) -> str: return 'order_book:changed_products:{}'.format(side.name) def __add__(self, order: Order) -> OrderBook: order_book = self.get_order_book(order.get_product_id()) val = order_book + order return val # allow subtraction of order to order book # this should be used for cancellation def __sub__(self, order: Order) -> OrderBook: order_book = self.get_order_book(order.get_product_id()) val = order_book - order return val
#encoding:utf-8 ''' 让用户"V_tz"的好友每隔1~5秒随机上线 ''' import requests import random import time from threading import Thread from redis import StrictRedis conn = StrictRedis() users = conn.execute_command("smembers V_tz") def worker(): while True: username = random.choice(users) requests.get("http://127.0.0.1/"+username) time.sleep(random.randint(1, 5)) for _ in range(20): Thread(target=worker).start() if __name__ == "__main__": username = random.choice(users) requests.get("http://127.0.0.1/"+username)
def run_test(self): # 禁止直接使用原始日志 self.plugin_run_log = copy.copy(self.plugin_base_run_log) # 公共日志部分 self.plugin_run_log["id"] = self.plugin_id self.plugin_run_log["oid"] = self.plugin_oid self.plugin_run_log["wid"] = self.worker_info_id self.plugin_run_log["uid"] = self.vuser_index self.plugin_run_log["st"] = round(time.time() * 1000) # 运行前数据填充 run_init_result, run_init_log = self.init_before_run() if run_init_result: self.plugin_run_log["rr_rb"] = self.request_command # 从参数化存储实例中获取数据库引擎 redis_pool = self.run_parameter_controller.get( self.request_pool_name) if redis_pool: try: db_connect = StrictRedis(connection_pool=redis_pool) db_result = db_connect.execute_command( self.request_command) except Exception as e: # 先把时间记录 self.plugin_run_log['et'] = round(time.time() * 1000) self.plugin_run_log['t'] = round( (self.plugin_run_log['et'] - self.plugin_run_log['st'])) self.plugin_run_log['s'] = False # 从错误中提取错误码及错误信息 self.response_code = -1 self.plugin_run_log["c"] = self.response_code self.plugin_run_log['f'] = '请求发生错误:%s;' % e else: # 先把时间记录 self.plugin_run_log['et'] = round(time.time() * 1000) self.plugin_run_log['t'] = round( (self.plugin_run_log['et'] - self.plugin_run_log['st'])) self.plugin_run_log['s'] = True """ redis常见返回值 b'OK' : bytes(utf8适用) ... : int b'...' : bytes(utf8不适用) [b'...'] : list(utf8适用) b'...' : bytes(utf8适用) None [b'...', None] : list(bytes(utf8适用)/None) [b'...1', b'...2'](长度固定为2) [b'subscribe', b'redisChat', 1] list(utf8适用) [b'...'] : list(utf8适用)(目前仅知深度为2) exception """ if self.request_var != "": # 将数据以{参数名: {协程/线程号1: 参数值1, 协程/线程号2: 参数值2}}的形式存储进总参数存储实例 if type( self.run_parameter_controller.get( self.request_var)) is VuserDataBottle: self.run_parameter_controller.get( self.request_var).update( {self.vuser_index: db_result}) else: v = VuserDataBottle() v.update({self.vuser_index: db_result}) self.run_parameter_controller.update( {self.request_var: v}) finally: # 通用代码段 # 5.对自身结果作断言 for pa in self.plugins_assertion: pa.run_test() # 6.执行后置插件操作 for ppp in self.plugins_postprocessor: ppp.run_test() else: # 连接池参数变量不存在则报错 # 先把时间记录 self.plugin_run_log['et'] = round(time.time() * 1000) self.plugin_run_log['t'] = round( (self.plugin_run_log['et'] - self.plugin_run_log['st'])) self.plugin_run_log['s'] = False self.response_code = -1 self.plugin_run_log["c"] = self.response_code self.plugin_run_log['f'] = '请求发生错误:连接池未定义;' else: # 先把时间记录 self.plugin_run_log['et'] = round(time.time() * 1000) self.plugin_run_log['t'] = round( (self.plugin_run_log['et'] - self.plugin_run_log['st'])) self.plugin_run_log['s'] = False self.response_code = -1 self.plugin_run_log["c"] = self.response_code self.plugin_run_log['f'] = '请求发生错误:%s;' % run_init_log # 调用方法将运行日志暂存至日志控制器 self.run_log_controller.set(self.plugin_run_log)
print("dirpath=" + dirpath) for file in filenames: # print("file=" + file) if ("json" in file): shortname = file.replace(".json", "") print("shortname is" + shortname) openname = dirpath + "/" + file print("openname is " + openname) data = json.loads(open(openname, "r").readline()) print("data is ") print(data['data']) print("members is") print(data['data']['members']) for member in data['data']['members']: print("the id is", member['id']) memberIdInt = member['id'] memberId = str(memberIdInt) memberIdFloat = float(memberIdInt) keyname = "member:" + memberId idxKeyName = "memberIndex:" + memberId zkeyname = "identifier:" # memberStr = json.dumps(member) memberStr = '{"id": 2048306}' jsonCommand = "json.set " + keyname + " . '" + memberStr + "'" # jsonCommand = "json.set " + keyname + " $ '" + memberStr + "' INDEX ID" print("jsonCommand is " + jsonCommand) # can't use jsonset command because index not available but otherwise, this worked # rj.jsonset(keyname, Path.rootPath(), member) rj.execute_command(jsonCommand) # rj.execute_command("json.index add ID $.identifiers.type")
MODULE DOCSTRING ''' # imports std lib # imports 3rd party libs # imports sprayer from itertools import izip from redis import StrictRedis s = StrictRedis(port=55511) #s = StrictRedis(port=33322) print s.ping() print 'segundo:', s.execute_command('PING') def get_masters(): masters_as_list = s.execute_command('SENTINEL', 'MASTERS') masters = {} for master_l in masters_as_list: #convert list representing a master to a dictionary i = iter(master_l) master_d = dict(izip(i, i)) masters[master_d['name']] = master_d return masters print get_masters() def promote_original_master(name):
class Outputer: def __init__(self, conf, processor): config_conf = conf['processor'][processor]['config'] self.nodename = config_conf["nodename"] self.eqpt_no = config_conf["eqpt_no"] self.status_set = conf['processor'][processor]['status'] self.status_mapping = {'run': 1, 'stop': 0} self.redis = StrictRedis(host='localhost', port=6379, db=1, socket_timeout=3) pass def message_process(self, msgline, task, measurement): """接受一条日志,解析完成后,取得相应信息,组成influxdb 所需字典,返回。 :param msgline: 日志 :param task: 本次实验任务名 :param measurement: 此实验的表名 :return: 以influxdb line protocal 组成的字典 """ not_valid = Outputer.check_valid(msgline) if not_valid: influx_json = { "fields": { 'msg': msgline }, "time": 1000000 * int(time.time()), "measurement": 'new_issueline' } return 1, 'wrong format.', influx_json log_list = msgline.split(' ', 5) # separate message and time time_str = ''.join(log_list[:4]) time = pendulum.from_format(time_str, '%H:%M:%S%b%d%Y', 'Asia/Shanghai').int_timestamp message = log_list[-1] status = self.get_status(message) script_name = Outputer.get_script_name(message) # load task. task = self.redis.execute_command('hget task name') influx_json = self.construct_json(time, message, measurement, status, script_name, task=task) # log.debug(influx_json) return 0, 'process successful', influx_json def construct_json(self, *args, **kwargs): """ time, message, item, inject_tags, status, task): :param args: :param kwargs: :return: """ time, message, measurement, status, script_name = args task = kwargs['task'] fields = {"Msg": message, "status": status, 'script_name': script_name} tags = { "node": self.nodename, "task": task, "eqpt_no": self.eqpt_no, } influx_json = { 'tags': tags, 'fields': fields, 'time': time, "measurement": measurement } return influx_json def get_status(self, log_msg): status_set = self.status_set for one_set in status_set: if log_msg.startswith(tuple(status_set[one_set])): status = one_set return self.status_mapping[status] return None @staticmethod def get_script_name(log_msg): if 'script' in log_msg: script_name = log_msg.split(' ', 5)[-1].split('/')[-1].split('.')[0] return script_name @staticmethod def check_valid(msgline): if msgline[0].isalpha(): log.info('unexpected msg_line, pass.') return True else: return False
def execute_command(self, *args, **options): packed_args = self.pack_args(*args) return StrictRedis.execute_command(self, *packed_args, **options)
'buys': [ { 'who': "Jim", 'required': 20 }, { 'who': "Amy", 'required': 17 } ] } ] for next_event in events: create_event(next_event['event'], next_event['qty'], next_event['price']) for buy in next_event['buys']: reserve_with_pending(buy['who'], next_event['event'], buy['required']) post_purchases(next_event['event']) for next_event in events: print "=== Event: {}".format(next_event['event']) print "Details: {}".format(redis.hgetall("events:" + next_event['event'])) print "Sales: {}".format(redis.smembers("sales:" + next_event['event'])) for buy in next_event['buys']: print "Invoices for {}: {}".format(buy['who'], redis.smembers("invoices:" + buy['who'])) print "=== Orders" for i in redis.scan_iter(match="purchase_orders:*"): print redis.get(i) print "=== Sales Summary \n{}".format(redis.hgetall("sales_summary")) print "=== Sales Summary - hour of sale histogram" hist = redis.get("sales_histogram:time_of_day") for i in range(0, 24): vals = ["GET", "u8", (i+1) * 8] total_sales = int(redis.execute_command("BITFIELD", "sales_histogram:time_of_day", *vals)[0]) print " {} = {}".format(i, total_sales)
class Feedback(object): def __init__(self, config=None): if config is None: config = {} self.redis = StrictRedis(**config['redis']) def get_allocation_key(self, site_id: str, experiment: str, recommender_id: str, allocation_time: datetime=None) -> str: if allocation_time is None: allocation_time = datetime.utcnow() long_key = '/'.join([site_id, experiment, recommender_id]) short_key = '{:08x}'.format(mmh3.hash(long_key) & INT_MAX) timestamp = allocation_time.strftime(HOUR_FORMAT) return short_key + ':' + timestamp def insert(self, allocation_key: str, event_type: str, rec_id: str, ttl=timedelta(days=60)): full_key = event_type + ':' + allocation_key key_exists = self.redis.exists(full_key) self.redis.execute_command('PFADD', full_key, rec_id) if not key_exists: timestamp = allocation_key.rsplit(':', 1)[-1] allocation_time = datetime.strptime(timestamp, HOUR_FORMAT) expire_time = allocation_time + ttl self.redis.expireat(full_key, expire_time) def count_distinct(self, allocation_key: str, event_type: str) -> int: return self.redis.execute_command('PFCOUNT', event_type + ':' + allocation_key) def increment_arm(self, event: Dict): key = self.get_allocation_key(site_id=event['site_id'], experiment=event['experiment'], recommender_id=event['recommender_id'], allocation_time=event['allocation_time'] ) rec_id = event.get('recset', 'NA') + ':' + event.get('rec_group', 'NA') + ':' + event.get('rec_position', 'NA') self.insert(key, event['event_type'], rec_id) def flush_redis(self): self.redis.flushall() def event_listener(self): '''TODO: integrate with kafka --currently stubbed''' event_stream = iter([ dict(site_id='666', event_type='rec_viewed', rec_time=datetime(2016, 11, 1, 12), recset='abc', rec_group='a', rec_position='1', recommender_id='fmv1', experiment='default'), dict(site_id='666', event_type='rec_viewed', rec_time=datetime(2016, 11, 1, 12), recset='abc', rec_group='a', rec_position='1', recommender_id='fmv1', experiment='default'), dict(site_id='666', event_type='rec_viewed', rec_time=datetime(2016, 11, 1, 12), recset='abc', rec_group='a', rec_position='1', recommender_id='fmv1', experiment='default'), dict(site_id='666', event_type='rec_clicked', rec_time=datetime(2016, 11, 1, 12), recset='abc', rec_group='a', rec_position='1', recommender_id='fmv1', experiment='default'), dict(site_id='666', event_type='rec_clicked', rec_time=datetime(2016, 11, 1, 12), recset='abc', rec_group='a', rec_position='1', recommender_id='fmv1', experiment='default'), dict(site_id='666', event_type='rec_clicked', rec_time=datetime(2016, 11, 1, 12), recset='abc', rec_group='a', rec_position='1', recommender_id='fmv1', experiment='default'), dict(site_id='238', event_type='rec_viewed', rec_time=datetime(2016, 11, 2, 8), recset='abc', rec_group='a', rec_position='1', recommender_id='collb', experiment='default'), dict(site_id='238', event_type='rec_clicked', rec_time=datetime(2016, 11, 2, 8), recset='abc', rec_group='a', rec_position='1', recommender_id='collb', experiment='default') ]) for event in event_stream: if event['event_type'] in ['rec_viewed', 'rec_clicked']: self.increment_arm(event) def perdelta(self, start: datetime, end: datetime, delta: timedelta) -> List[datetime]: output = [] curr = start while curr < end: curr = curr + delta output.append(curr) return output def get_recent_hours(self, days_ago: int=30) -> List[datetime]: end_hour = datetime.now().replace(minute=0, second=0, microsecond=0) start_hour = end_hour - timedelta(days=days_ago) return self.perdelta(start_hour, end_hour, timedelta(hours=1)) def enumerate_keys(self, site_ids: List[str], experiments: List[str], recommender_ids: List[str]) -> pd.DataFrame: header = ['site_id', 'experiment', 'recommender_id', 'date_hour', 'trials', 'successes'] rows = [] for site_id in site_ids: for experiment in experiments: for recommender_id in recommender_ids: for date_hour in self.get_recent_hours(): key = self.get_allocation_key(site_id, experiment, recommender_id, date_hour) view_count = self.count_distinct(key, 'rec_viewed') click_count = self.count_distinct(key, 'rec_clicked') rows.append([site_id, experiment, recommender_id, date_hour, view_count, click_count]) df = pd.DataFrame(data=rows, columns=header) # TODO: date filter return df.groupby(['site_id', 'experiment', 'recommender_id']).sum().reset_index()