def test_connection_exceptions(self, srs): # Default: losing connection raises connection error strm_raise_conn_loss = srs.streams(streams=streams, block=10) srs.xadd("S3_" + long_suffix, index=4000) _ = strm_raise_conn_loss.next() strm_raise_conn_loss.connection = StrictRedis(host=long_suffix) with pytest.raises(redis.exceptions.ConnectionError): _ = strm_raise_conn_loss.next() # returning connection error rather than raising it strm_ret_conn_loss = srs.streams(streams=streams, block=10, raise_connection_exceptions=False) srs.xadd("S3_" + long_suffix, index=4000) _ = strm_ret_conn_loss.next() real_connection = strm_ret_conn_loss.connection # simulate lost connection strm_ret_conn_loss.connection = StrictRedis(host=long_suffix) msg = strm_ret_conn_loss.next() assert isinstance(msg, ConnectionError) # simulate restored connection strm_ret_conn_loss.connection = real_connection msg = strm_ret_conn_loss.next() assert msg is None
def test_is_rate_limited_script(): now = int(time.time()) client = StrictRedis(db=9) # The item should not be rate limited by either key. assert map( bool, is_rate_limited(client, ('foo', 'bar'), (1, now + 60, 2, now + 120))) == [False, False] # The item should be rate limited by the first key (1). assert map( bool, is_rate_limited(client, ('foo', 'bar'), (1, now + 60, 2, now + 120))) == [True, False] # The item should still be rate limited by the first key (1), but *not* # rate limited by the second key (2) even though this is the third time # we've checked the quotas. This ensures items that are rejected by a lower # quota don't affect unrelated items that share a parent quota. assert map( bool, is_rate_limited(client, ('foo', 'bar'), (1, now + 60, 2, now + 120))) == [True, False] assert client.get('foo') == '1' assert 59 <= client.ttl('foo') <= 60 assert client.get('bar') == '1' assert 119 <= client.ttl('bar') <= 120
def __init__(self): self._queue = Queue( connection=StrictRedis(host=REDIS_HOST, port=REDIS_PORT)) self._rq_redis_storage = RQRedisDataStorage.instance() self._redis_subscriber = RedisSubscriber.instance() self._subscribed_callbacks = {} self._listen_for_results()
def _initialize_redis_cluster() -> RedisClientType: if settings.USE_REDIS_CLUSTER: startup_nodes = settings.REDIS_CLUSTER_STARTUP_NODES if startup_nodes is None: startup_nodes = [{ "host": settings.REDIS_HOST, "port": settings.REDIS_PORT }] startup_cluster_nodes = [ ClusterNode(n["host"], n["port"]) for n in startup_nodes ] return RetryingStrictRedisCluster( startup_nodes=startup_cluster_nodes, socket_keepalive=True, password=settings.REDIS_PASSWORD, max_connections_per_node=True, ) else: return StrictRedis( host=settings.REDIS_HOST, port=settings.REDIS_PORT, password=settings.REDIS_PASSWORD, db=settings.REDIS_DB, socket_keepalive=True, )
def test_truncate_timeline_script(self): client = StrictRedis(db=9) timeline = 'timeline' # Preload some fake records (the contents don't matter.) records = list(itertools.islice(self.records, 10)) for record in records: client.zadd(timeline, record.timestamp, record.key) client.set(make_record_key(timeline, record.key), 'data') with self.assertChanges(lambda: client.zcard(timeline), before=10, after=5): truncate_timeline((timeline, ), (5, ), client) # Ensure the early records don't exist. for record in records[:5]: assert not client.zscore(timeline, record.key) assert not client.exists(make_record_key(timeline, record.key)) # Ensure the later records do exist. for record in records[-5:]: assert client.zscore(timeline, record.key) == float(record.timestamp) assert client.exists(make_record_key(timeline, record.key))
def __init__(self, size=5, test_url="http://www.baidu.com"): self.test_url = test_url self.size = size self.redis_db = StrictRedis() # 队列 self.proxy = Queue() self.headers = {"User-Agent": UserAgent().chrome}
def setUp(self): self.testContext.startMock() self.redisClient = StrictRedis('127.0.0.1', 6379, 0) daobase.executeMixCmd = self.runRedisCmd daobase._executePayDataCmd = self.runRedisCmd daobase.executeUserCmd = self.runUserRedisCmd self.testContext.configure.setJson('game:9999:map.clientid', clientIdMap, 0) self.testContext.configure.setJson('game:9999:item', item_conf, 0) self.testContext.configure.setJson('game:9999:products', products_conf, 0) self.testContext.configure.setJson('game:9999:store', store_template_conf, 0) self.testContext.configure.setJson('game:9999:store', store_default_conf, clientIdMap[self.clientId]) self.testContext.configure.setJson('game:9999:vip', vip_conf, 0) self.testContext.configure.setJson('game:9999:benefits', benefits_conf, 0) self.testContext.configure.setJson('game:9999:share', share_conf, 0) self.timestamp = pktimestamp.getCurrentTimestamp() self.pktimestampPatcher = patch('poker.util.timestamp.getCurrentTimestamp', self.getCurrentTimestamp) self.pktimestampPatcher.start() hallitem._initialize() hallvip._initialize() hallbenefits._initialize() hallshare._initialize() daobase.executeUserCmd(10001, 'del', 'share.status:9999:10001')
def __init__(self): self.url = 'http://wapi.http.linkudp.com/index/index/get_free_ip' self.redis_client = StrictRedis(host='127.0.0.1', port=6379) self.headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36' } self.session = requests.Session()
def __init__(self): # Load the GeoIP databases into class attributes since they each need 20+ MB in memory if not self.__class__._geoip4: self.__class__._geoip4 = GeoIP(Config.GEOIP_PATH_V4, MEMORY_CACHE) if not self.__class__._geoip6: self.__class__._geoip6 = GeoIP(Config.GEOIP_PATH_V6, MEMORY_CACHE) self.redis = StrictRedis(Config.REDIS['HOST'], Config.REDIS['PORT'], Config.REDIS['DB'])
def cluster_factory(): if config.get("is_redis_cluster", False): return RetryingStrictRedisCluster( startup_nodes=hosts, decode_responses=True, skip_full_coverage_check=True ) else: host = hosts[0].copy() host["decode_responses"] = True return StrictRedis(**host)
def testRedis(): redisClient = StrictRedis('127.0.0.1', 6379, 0) print redisClient.execute_command("hgetall", 'msignin5:6:6666:1010:66661000') print redisClient.hgetall('msignin5:6:6666:1010:66661000') print redisClient.execute_command("HGETALL", 'msignin5:6:6666:1010:66661000') print "*"*5 print redisClient.execute_command("hgetall", 'mstatus2:6') print redisClient.hgetall('mstatus2:6') print redisClient.execute_command("HGETALL", 'mstatus2:6')
def initDao(): global redisClient redisClient = StrictRedis('127.0.0.1', 6379, 0) DaoRoomInfo._ftredis = MockRedis() DaoMatchStatus._ftredis = MockRedis() DaoMatchSigninRecord._ftredis = MockRedis() DaoUserSigninRecord._ftredis = MockRedis() DaoMatchPlayerInfo._ftredis = MockRedis() DaoUserMatchHistory._ftredis = MockRedis()
def __init_rd(self, master=False): if self.rd is None: if master: self.rd = StrictRedis(host=_redis_servers[0][0], port=_redis_servers[0][1], db=_redis_servers[0][2]) self._master_rd = True else: server = random.choice(_redis_servers) self.rd = StrictRedis(host=server[0], port=server[1], db=server[2]) self._master_rd = False elif master and not self._master_rd: self.rd = StrictRedis(host=_redis_servers[0][0], port=_redis_servers[0][1], db=_redis_servers[0][2]) self._master_rd = True
def __init__(self, db_host, db_port, db_num, db_pw): self.pool = ConnectionPool(max_connections=2, db=db_num, host=db_host, port=db_port, password=db_pw, decode_responses=True) self.redis = StrictRedis(connection_pool=self.pool) self.redis.ping() self._object_map = WeakValueDictionary()
def __init__(self, config): if self._validateConfig(config): self._r = StrictRedis( host=config[REDIS_DATASOURCE_CONFIG] [REDIS_DATASOURCE_CONFIG_HOST], port=config[REDIS_DATASOURCE_CONFIG] [REDIS_DATASOURCE_CONFIG_PORT], db=config[REDIS_DATASOURCE_CONFIG][REDIS_DATASOURCE_CONFIG_DB]) logger.debug("Obtained internal redis handler" + str(self._r)) else: raise BaseException("Error validating config ")
def __init__(self): """初始化对象""" self.base_url = 'http://www.neihanpa.com/article' self.start_index = int(raw_input('请输入开始页:')) self.end_index = int(raw_input('请输入结束页:')) self.headers = HEADERS_USER # 创建队列存储页面 self.queue = Queue(int(self.end_index - self.start_index)) # 创建匹配规则获取urls self.xpath_urls = '//a[@class="title"and @title]/@href ' # 创建Redis链接 self.redis_cli = StrictRedis('127.0.0.1')
def __init__(self, sentinels, password=None, socket_timeout=None, min_other_sentinels=0): self.sentinels = [ StrictRedis(hostname, port, password=password, socket_timeout=socket_timeout) for hostname, port in sentinels ] self.min_other_sentinels = min_other_sentinels
def test_ensure_timeline_scheduled_script(self): client = StrictRedis(db=9) timeline = 'timeline' timestamp = 100.0 waiting_set_size = functools.partial(client.zcard, 'waiting') ready_set_size = functools.partial(client.zcard, 'ready') timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline) timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline) keys = ('waiting', 'ready', 'last-processed') # The first addition should cause the timeline to be added to the ready set. with self.assertChanges(ready_set_size, before=0, after=1), \ self.assertChanges(timeline_score_in_ready_set, before=None, after=timestamp): assert ensure_timeline_scheduled(keys, (timeline, timestamp, 1, 10), client) == 1 # Adding it again with a timestamp in the future should not change the schedule time. with self.assertDoesNotChange(waiting_set_size), \ self.assertDoesNotChange(ready_set_size), \ self.assertDoesNotChange(timeline_score_in_ready_set): assert ensure_timeline_scheduled(keys, (timeline, timestamp + 50, 1, 10), client) is None # Move the timeline from the ready set to the waiting set. client.zrem('ready', timeline) client.zadd('waiting', timestamp, timeline) client.set('last-processed', timestamp) increment = 1 with self.assertDoesNotChange(waiting_set_size), \ self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp + increment): assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 10), client) is None # Make sure the schedule respects the maximum value. with self.assertDoesNotChange(waiting_set_size), \ self.assertChanges(timeline_score_in_waiting_set, before=timestamp + 1, after=timestamp): assert ensure_timeline_scheduled(keys, (timeline, timestamp, increment, 0), client) is None # Test to ensure a missing last processed timestamp can be handled # correctly (chooses minimum of schedule value and record timestamp.) client.zadd('waiting', timestamp, timeline) client.delete('last-processed') with self.assertDoesNotChange(waiting_set_size), \ self.assertDoesNotChange(timeline_score_in_waiting_set): assert ensure_timeline_scheduled(keys, (timeline, timestamp + 100, increment, 10), client) is None with self.assertDoesNotChange(waiting_set_size), \ self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 100): assert ensure_timeline_scheduled(keys, (timeline, timestamp - 100, increment, 10), client) is None
def __init__(self, start_url=None): super().__init__(start_url) # 初始化redis数据库 self.redis_key = 'CarQaFilter' self.redis_db = StrictRedis(host='127.0.0.1', port=6379, db=1) # 初始化mongodb数据库 self.mongo_client = MongoClient() self.collection = self.mongo_client['autocar']['qa'] # 详情页队列 self.detail = Queue() # re预编译 self.clean_detail = re.compile(r'\s|\n')
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None, **connection_kwargs): # if sentinel_kwargs isn't defined, use the socket_* options from # connection_kwargs if sentinel_kwargs is None: sentinel_kwargs = dict([(k, v) for k, v in iteritems(connection_kwargs) if k.startswith('socket_') ]) self.sentinel_kwargs = sentinel_kwargs self.sentinels = [StrictRedis(hostname, port, **self.sentinel_kwargs) for hostname, port in sentinels] self.min_other_sentinels = min_other_sentinels self.connection_kwargs = connection_kwargs
def run_once(): for port in REDIS_PORT.split(','): if ',' in REDIS_PORT: statsd_prefix = STATSD_PREFIX + '-{}'.format(port) else: statsd_prefix = STATSD_PREFIX redis = StrictRedis(REDIS_HOST, port) stats = redis.info() stats['keyspaces'] = {} for key in stats.keys(): if key.startswith('db'): stats['keyspaces'][key] = stats[key] del stats[key] out_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) for g in GAUGES: if g in stats: send_metric(out_sock, '{}.{}'.format(statsd_prefix, g), 'g', float(stats[g])) for c in COUNTERS: if c in stats: send_metric(out_sock, '{}.{}'.format(statsd_prefix, c), 'c', float(stats[c])) for ks in stats['keyspaces']: for kc in KEYSPACE_COUNTERS: if kc in stats['keyspaces'][ks]: send_metric(out_sock, '{}.keyspace.{}'.format(statsd_prefix, kc), 'c', float(stats['keyspaces'][ks][kc])) for kg in KEYSPACE_GAUGES: if kg in stats['keyspaces'][ks]: send_metric(out_sock, '{}.keyspace.{}'.format(statsd_prefix, kg), 'g', float(stats['keyspaces'][ks][kg])) out_sock.close() time.sleep(PERIOD)
def __init__(self, shards=[('localhost', 6379)], duration=None, hashfn=None, db=0, password=None, socket_timeout=None, charset='utf-8', errors='strict'): shards = [ StrictRedis(host=x[0], port=x[1], db=db, password=password, socket_timeout=socket_timeout, charset=charset, errors=errors) for x in shards ] super(ShardedRedis, self).__init__(shards, duration, hashfn)
def cluster_factory(): if config.get("is_redis_cluster", False): return RetryingRedisCluster( # Intentionally copy hosts here because redis-cluster-py # mutates the inner dicts and this closure can be run # concurrently, as SimpleLazyObject is not threadsafe. This # is likely triggered by RetryingRedisCluster running # reset() after startup # # https://github.com/Grokzen/redis-py-cluster/blob/73f27edf7ceb4a408b3008ef7d82dac570ab9c6a/rediscluster/nodemanager.py#L385 startup_nodes=deepcopy(hosts), decode_responses=True, skip_full_coverage_check=True, max_connections=16, max_connections_per_node=True, ) else: host = hosts[0].copy() host["decode_responses"] = True return StrictRedis(**host)
def get_redis_conn(redisdef): if isinstance(redisdef, dict): host = redisdef['host'] port = int(redisdef['port']) dbid = int(redisdef['dbid']) elif isinstance(redisdef, (list, tuple)): host = redisdef[0] port = int(redisdef[1]) dbid = int(redisdef[2]) else: datas = redisdef.split(':') host = datas[0] port = int(datas[1]) dbid = int(datas[2]) for x in (6, 5, 4, 3, 2, 1): try: rconn = StrictRedis(host=host, port=port, db=dbid) return rconn except Exception, e: if x == 1: raise e
def __init__(self): super().__init__() self.setWindowTitle("Redis Explorer") self.resize(QtWidgets.QDesktopWidget().availableGeometry(self).size() * 0.5) self.tree = QtWidgets.QTreeWidget() self.label = QtWidgets.QTextEdit() font = self.label.font() font.setPointSize(12) self.label.setFont(font) self.tree.setColumnCount(2) self.tree.header().setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents) self.tree.setHeaderHidden(True) splitter = QtWidgets.QSplitter() splitter.addWidget(self.tree) scroll_area = QtWidgets.QScrollArea() scroll_area.setWidgetResizable(True) scroll_area.setWidget(self.label) splitter.addWidget(scroll_area) splitter.setSizes([1, 1]) self.setCentralWidget(splitter) toolbar = self.addToolBar("") toolbar.setMovable(False) toolbar.setIconSize(QtCore.QSize(32, 32)) toolbar.addAction(QtGui.QIcon("resources/list-add.png"), "").triggered.connect(lambda: self.plus_font(1)) toolbar.addAction(QtGui.QIcon("resources/list-remove.png"), "").triggered.connect(lambda: self.plus_font(-1)) toolbar.addAction(QtGui.QIcon("resources/view-refresh.png"), "").triggered.connect(lambda: self.refresh()) self.redis = StrictRedis() def item_clicked(item: QtWidgets.QTreeWidgetItem): if item.parent() is None: return value = self.redis.get(item.text(0)) value = value.decode() text = json.dumps(json.loads(value), ensure_ascii=False, indent=4) text = text.replace(" ", " ").replace("\n", "<br/>").replace(":", "<font color=red>:</font>") text = re.sub(r'"(.*?)(?<!\\)"', r'<font color=green>"\g<1>"</font>', text) self.label.setHtml(text) self.tree.itemClicked.connect(item_clicked)
def setUp(self): self.testContext.startMock() self.redisClient = StrictRedis('127.0.0.1', 6379, 0) daobase.executeMixCmd = self.runRedisCmd daobase._executePayDataCmd = self.runRedisCmd daobase.executeUserCmd = self.runUserRedisCmd daobase.sendUserCmd = self.runUserRedisCmd daobase.executeUserCmd(self.userId, 'del', 'yyb.gifts:%s' % (self.userId)) self.testContext.configure.setJson('poker:global', {'config.game.ids':[6,9999]}, None) self.testContext.configure.setJson('game:9999:map.clientid', clientIdMap, 0) self.testContext.configure.setJson('poker:map.clientid', clientIdMap, None) self.testContext.configure.setJson('game:9999:item', item_conf, 0) self.testContext.configure.setJson('game:9999:yyb.gifts', conf, 0) self.timestamp = pktimestamp.getCurrentTimestamp() self.pktimestampPatcher = patch('poker.util.timestamp.getCurrentTimestamp', self.getCurrentTimestamp) self.pktimestampPatcher.start() userdata.setAttr(self.userId, 'lastAuthorTime', '2017-10-27 00:00:00.000') userdata.setAttr(self.userId, 'authorTime', '2017-11-28 00:00:00.000') hallitem._initialize() hall_yyb_gifts._initialize()
def test_ensure_timeline_scheduled_script(self): client = StrictRedis(db=9) timeline = 'timeline' timestamp = 100.0 waiting_set_size = functools.partial(client.zcard, 'waiting') ready_set_size = functools.partial(client.zcard, 'ready') timeline_score_in_waiting_set = functools.partial(client.zscore, 'waiting', timeline) timeline_score_in_ready_set = functools.partial(client.zscore, 'ready', timeline) # The first addition should cause the timeline to be added to the waiting set. with self.assertChanges(waiting_set_size, before=0, after=1), \ self.assertChanges(timeline_score_in_waiting_set, before=None, after=timestamp): ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp), client) # Adding it again with a timestamp in the future should not change the schedule time. with self.assertDoesNotChange(waiting_set_size), \ self.assertDoesNotChange(timeline_score_in_waiting_set): ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp + 50), client) # If we see a record with a timestamp earlier than the schedule time, # we should change the schedule. with self.assertDoesNotChange(waiting_set_size), \ self.assertChanges(timeline_score_in_waiting_set, before=timestamp, after=timestamp - 50): ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp - 50), client) # Move the timeline from the waiting set to the ready set. client.zrem('waiting', timeline) client.zadd('ready', timestamp, timeline) # Nothing should change. with self.assertDoesNotChange(waiting_set_size), \ self.assertDoesNotChange(ready_set_size), \ self.assertDoesNotChange(timeline_score_in_ready_set): ensure_timeline_scheduled(('waiting', 'ready'), (timeline, timestamp - 50), client)
from flask import Flask, request, send_from_directory from flask_socketio import SocketIO, join_room, leave_room, send, emit from redis.client import StrictRedis app = Flask(__name__) socketio = SocketIO(app) r = StrictRedis(host='localhost', port=6379, db=0) @app.route("/") def hello(): return send_from_directory('./', 'index.html') @app.route("/main.js") def main_js(): return send_from_directory('./', 'main.js') @app.route("/signaling.js") def signaling_js(): return send_from_directory('./', 'signaling.js') @socketio.on('broadcast') def handle_message(json): room = json['room'] send(json, room=room) @socketio.on('new-offer')
def __init__(self, host, port, level): DataStorage.__init__(self, level) self._storage = StrictRedis(host=host, port=port)
from config import REDIS_HOST, REDIS_PORT from rq_gevent_worker import GeventWorker from redis.client import StrictRedis from rq import Connection, Queue from logger import worker_logger if __name__ == '__main__': # Tell rq what Redis connection to use with Connection(connection=StrictRedis(host=REDIS_HOST, port=REDIS_PORT)): q = Queue(connection=StrictRedis(host=REDIS_HOST, port=REDIS_PORT)) waiting_jobs = q.get_jobs() for job in waiting_jobs: q.remove(job) gevent_worker = GeventWorker(q) gevent_worker.log = worker_logger gevent_worker.work()