def test(): c1 = yield from Connection.create(port=PORT, poolsize=10, db=1) c2 = yield from Connection.create(port=PORT, poolsize=10, db=2) c3 = yield from Connection.create(port=PORT, poolsize=10, db=1) c4 = yield from Connection.create(port=PORT, poolsize=10, db=2) yield from c1.set('key', 'A') yield from c2.set('key', 'B') r1 = yield from c3.get('key') r2 = yield from c4.get('key') self.assertEqual(r1, 'A') self.assertEqual(r2, 'B')
def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) self.redis = self.loop.run_until_complete( Connection.create(db=0, encoder=BytesEncoder(), loop=self.loop))
def test(): # Create connection connection = yield from Connection.create(port=PORT, poolsize=3) t1 = yield from connection.multi() t2 = yield from connection.multi() t3 = yield from connection.multi() # Fourth transaction should fail. (Pool is full) with self.assertRaises(RedisException) as e: yield from connection.multi() self.assertEqual(e.exception.args[0], 'All connection in the pool are in use. Please increase the poolsize.') # Run commands in transaction f1 = yield from t1.set(u'key', u'value') f2 = yield from t2.set(u'key2', u'value2') # Commit. yield from t1.exec() yield from t2.exec() # Check result1 = yield from connection.get(u'key') result2 = yield from connection.get(u'key2') self.assertEqual(result1, u'value') self.assertEqual(result2, u'value2')
def test(): # Create connection connection = yield from Connection.create(port=PORT, poolsize=2) yield from connection.delete([ 'my-list' ]) results = [] # Sink: receive items using blocking pop @asyncio.coroutine def sink(): for i in range(0, 5): reply = yield from connection.blpop(['my-list']) self.assertIsInstance(reply, BlockingPopReply) results.append(reply.value) # Source: Push items on the queue @asyncio.coroutine def source(): for i in range(0, 5): result = yield from connection.rpush('my-list', [str(i)]) yield from asyncio.sleep(.5) # Run both coroutines. f1 = asyncio.Task(source()) f2 = asyncio.Task(sink()) yield from gather(f1, f2) # Test results. self.assertEqual(results, [ str(i) for i in range(0, 5) ])
def _get_subscription(redis_host, redis_port, redis_key): connection = yield from RedisConnection.create( host=redis_host, port=redis_port) subscriber = yield from connection.start_subscribe() yield from subscriber.subscribe([redis_key]) return subscriber
def middleware(request): tag = request.cookies.get('chatterbox', None) request.tag = tag or ''.join(random.choice(string.ascii_letters) for x in range(16)) request.conn = yield from Connection.create(host='localhost', port=6379) # Rate limit key = make_key(request.tag, 'rated') now = time.time() yield from request.conn.zadd(key, {str(int(now)): now}) yield from request.conn.expireat(key, int(now) + RATE_LIMIT_DURATION) yield from request.conn.zremrangebyscore( key, ZScoreBoundary('-inf'), ZScoreBoundary(now - RATE_LIMIT_DURATION) ) size = yield from request.conn.zcard(key) if size > RATE_LIMIT: response = web.Response(body=b'', status_code=429) else: # Call handler response = yield from handler(request) # Set cookie if tag is None: response.set_cookie('chatterbox', request.tag) return response
def testConnectionReconnect(self): current = self.open_fd() for _ in range(20): with self.assertRaises(asyncio.futures.TimeoutError): yield from asyncio.wait_for( Connection.create(auto_reconnect=True), timeout=self.conn_wait) self.assertEqual(self.open_fd(), current)
def testConnectionReconnect(self): current = self.open_fd() for i in range(10): task = Connection.create(host=self.host, auto_reconnect=True) with self.assertRaises(asyncio.futures.TimeoutError): yield from asyncio.wait_for(task, timeout=self.conn_wait) self.assertEqual(self.open_fd(), current + i + 1) print(self.open_fd()) yield from asyncio.sleep(self.sleep)
def testConnectionStable(self): current = self.open_fd() for _ in range(10): c = yield from Connection.create(auto_reconnect=False) self.assertEqual(self.open_fd(), current + 1) with self.assertRaises(asyncio.futures.TimeoutError): yield from asyncio.wait_for(c.get("key1"), timeout=self.cmd_wait) c.transport.close() yield from asyncio.sleep(self.conn_wait)
def test(): # Create connection connection = yield from Connection.create(port=PORT) # Test get/set yield from connection.set('key', 'value') result = yield from connection.get('key') self.assertEqual(result, 'value') # Test default poolsize self.assertEqual(connection.poolsize, 1)
def testConnectionReconnect(self): c = yield from Connection.create(auto_reconnect=True) current = self.open_fd() for _ in range(10): self.assertEqual(self.open_fd(), current) # Вот этот transport.close() еще надо вызвать, а он не вызывается # потому что c.get() выполняется бесконечно. asyncio.get_event_loop().call_later(self.cmd_wait, c.transport.close) with self.assertRaises(asyncio_redis.ConnectionLostError): yield from c.get("key1") yield from asyncio.sleep(self.sleep)
def testConnectionReconnect(self): """ Проверяет что при включенном реконнекте число файловых дескрипторов не растет.""" c = yield from Connection.create(auto_reconnect=True) current = self.open_fd() for _ in range(10): yield from c.get("key1") c.transport.close() # в close выполняется loop.call_soon(_reconnect) # который выполняется достаточно долго yield from asyncio.sleep(self.conn_wait) self.assertEqual(self.open_fd(), current) print(self.open_fd())
def testConnectionClose(self): """ Проверяем что при выключенном реконнекте явное закрытие соединения отпускает файловый дескриптор в asyncio_redis.Connection.""" current = self.open_fd() for _ in range(10): c = yield from Connection.create(auto_reconnect=False) self.assertEqual(self.open_fd(), current + 1) yield from c.get("key1") c.transport.close() # в close выполняется loop.call_soon() yield from asyncio.sleep(self.conn_wait) self.assertEqual(self.open_fd(), current) print(self.open_fd())
def test(): # Create connection connection = yield from Connection.create(port=PORT) self.assertEqual(connection.connections_in_use, 0) # Wait for ever. (This blocking pop doesn't return.) yield from connection.delete([ 'unknown-key' ]) f = asyncio.Task(connection.blpop(['unknown-key'])) yield from asyncio.sleep(.1) # Sleep to make sure that the above coroutine started executing. # Run command in other thread. with self.assertRaises(RedisException) as e: yield from connection.set('key', 'value') self.assertEqual(e.exception.args[0], 'All connection in the pool are in use. Please increase the poolsize.') self.assertEqual(connection.connections_in_use, 1)
def test(): # Setup connection = yield from Connection.create(port=PORT, poolsize=3) yield from connection.set(u'key', u'0') yield from connection.set(u'other_key', u'0') # Test t = yield from connection.multi(watch=['other_key']) yield from connection.set('other_key', 'other_value') yield from t.set(u'other_key', u'value') with self.assertRaises(TransactionError): yield from t.exec() # Check result = yield from connection.get(u'other_key') self.assertEqual(result, u'other_value')
def test(): # Setup connection = yield from Connection.create(port=PORT, poolsize=3) yield from connection.set(u'key', u'0') yield from connection.set(u'other_key', u'0') # Test t = yield from connection.multi(watch=['other_key']) f = yield from t.set(u'key', u'value') f2 = yield from t.set(u'other_key', u'my_value') yield from t.exec() # Check result = yield from connection.get(u'key') self.assertEqual(result, u'value') result = yield from connection.get(u'other_key') self.assertEqual(result, u'my_value')
def _main_loop(consumer_function, consumer_function_kwargs, redis_host=None, redis_port=6379, redis_key=DEFAULT_REDIS_KEY): LOG.info('Connecting to {host} on port {port}'.format( host=redis_host, port=redis_port)) connection = yield from RedisConnection.create( host=redis_host, port=redis_port) LOG.info('Subscribing to key "{key}"'.format(key=redis_key)) subscriber = yield from _get_subscription( redis_host=redis_host, redis_port=redis_port, redis_key=redis_key) timeout = _INITIAL_TIMEOUT consumer_future = None while True: try: LOG.debug('Waiting for a published message with timeout of ' '{}'.format(timeout)) message = yield from asyncio.wait_for( subscriber.next_published(), timeout) LOG.debug('Notified of new message: {}'.format(message)) except asyncio.TimeoutError: LOG.debug('Timed out after {} seconds'.format(timeout)) # Cancel the currently running consumer as soon as possible if consumer_future is not None: LOG.debug('Cancelling future') consumer_future.cancel() now = time.time() queue_updates = yield from _get_queue_updates( connection, now, redis_key) yield from _remove_expire_packages( connection=connection, packages=queue_updates.expired_packages, redis_key=redis_key) highest_priority_package = queue_updates.highest_priority_package timeout = _time_until_package_expires(highest_priority_package, now) if asyncio.iscoroutinefunction(consumer_function): consumer_future = asyncio.async(consumer_function( highest_priority_package, **consumer_function_kwargs)) else: consumer_function( highest_priority_package, **consumer_function_kwargs) consumer_future = None
def test(): # Create connection connection = yield from Connection.create(port=PORT, poolsize=10) for i in range(0, 10): yield from connection.delete([ 'my-list-%i' % i ]) @asyncio.coroutine def sink(i): the_list, result = yield from connection.blpop(['my-list-%i' % i]) for i in range(0, 10): self.assertEqual(connection.connections_in_use, i) asyncio.Task(sink(i)) yield from asyncio.sleep(.1) # Sleep to make sure that the above coroutine started executing. # One more blocking call should fail. with self.assertRaises(RedisException) as e: yield from connection.delete([ 'my-list-one-more' ]) yield from connection.blpop(['my-list-one-more']) self.assertEqual(e.exception.args[0], 'All connection in the pool are in use. Please increase the poolsize.')
def main(): loop = asyncio.get_event_loop() handler = Handler() redis = loop.run_until_complete( Connection.create(db=0, encoder=BytesEncoder(), loop=loop)) session_factory = RedisSessionFactory(redis, secret_key=b'secret', cookie_name='test_cookie', loop=loop) server = aiorest.RESTServer(hostname='127.0.0.1', keep_alive=75, session_factory=session_factory, loop=loop) server.add_url('GET', '/count', handler.counter) srv = loop.run_until_complete( loop.create_server(server.make_handler, '127.0.0.1', 8080)) @asyncio.coroutine def query(): connector = aiohttp.TCPConnector(share_cookies=True, loop=loop) for _ in range(6): resp = yield from aiohttp.request('GET', 'http://127.0.0.1:8080/count', connector=connector, loop=loop) data = yield from resp.json() print('Count is', data) loop.run_until_complete(query()) srv.close() loop.run_until_complete(srv.wait_closed()) loop.close()
def main(): loop = asyncio.get_event_loop() handler = Handler() redis = loop.run_until_complete( Connection.create(db=0, encoder=BytesEncoder(), loop=loop)) session_factory = RedisSessionFactory(redis, secret_key=b'secret', cookie_name='test_cookie', loop=loop) server = aiorest.RESTServer(hostname='127.0.0.1', keep_alive=75, session_factory=session_factory, loop=loop) server.add_url('GET', '/count', handler.counter) srv = loop.run_until_complete(loop.create_server( server.make_handler, '127.0.0.1', 8080)) @asyncio.coroutine def query(): connector = aiohttp.TCPConnector(share_cookies=True, loop=loop) for _ in range(6): resp = yield from aiohttp.request( 'GET', 'http://127.0.0.1:8080/count', connector=connector, loop=loop) data = yield from resp.json() print('Count is', data) loop.run_until_complete(query()) srv.close() loop.run_until_complete(srv.wait_closed()) loop.close()
def _create_client(self, **kwargs): return (yield from Connection.create(**kwargs))