def testPoolReconnect(self): current = self.open_fd() for _ in range(20): with self.assertRaises(asyncio.futures.TimeoutError): yield from asyncio.wait_for( Pool.create(auto_reconnect=True, poolsize=self.poolsize), timeout=self.conn_wait) self.assertEqual(self.open_fd(), current)
def testPoolStable(self): c = yield from Pool.create(auto_reconnect=False, poolsize=self.poolsize) current = self.open_fd() for i in range(self.poolsize): print(self.open_fd()) with self.assertRaises(asyncio.futures.TimeoutError): yield from asyncio.wait_for(c.get("key1"), timeout=self.cmd_wait) c.transport.close() yield from asyncio.sleep(self.sleep) self.assertEqual(self.open_fd(), current - i - 1)
def testPoolReconnect(self): current = self.open_fd() for i in range(20): print(self.open_fd()) task = Pool.create(host=self.host, auto_reconnect=True, poolsize=self.poolsize) with self.assertRaises(asyncio.futures.TimeoutError): yield from asyncio.wait_for(task, timeout=self.conn_wait) yield from asyncio.sleep(self.sleep) # реконнект в действии - к сожалению, task.cancel не закроет # сокет, открытый где-то в недрах asyncio. self.assertEqual(self.open_fd(), current + i + 1)
def testPoolStable(self): current = self.open_fd() for i in range(self.poolsize): print(self.open_fd()) with self.assertRaises(asyncio.futures.TimeoutError): task = Pool.create(host=self.host, auto_reconnect=False, poolsize=self.poolsize) c = yield from asyncio.wait_for(task, timeout=self.conn_wait) yield from asyncio.wait_for(c.get("key1"), timeout=self.cmd_wait) yield from asyncio.sleep(self.sleep) self.assertEqual(self.open_fd(), current + i + 1)
def run(self): self.srv = yield from self.loop.create_server(self.srv_handler, self.address, self.port) self.redis = yield from Pool.create(poolsize=100) Model.redis = self.redis self.mivb = Operator() for line in (yield from self.mivb): for route in (yield from line): # TODO: Gracefully cancel tasks when killing the serever asyncio.async(utils.run_periodically(route.update, 5, 5)) print('Running server on http://%s:%s' % (self.address, self.port))
def testPoolReconnect(self): """ Проверяет что при включенном реконнекте число файловых дескрипторов не растет.""" c = yield from Pool.create(auto_reconnect=True, poolsize=self.poolsize) current = self.open_fd() for _ in range(20): yield from c.get("key1") # закрываем случайное активное соединение c.transport.close() # в close выполняется loop.call_soon(_reconnect) # который выполняется достаточно долго yield from asyncio.sleep(self.conn_wait) self.assertEqual(self.open_fd(), current)
def testPoolClose(self): """ Проверяем что при выключенном реконнекте явное закрытие соединения отпускает файловый дескриптор в asyncio_redis.Connection.""" c = yield from Pool.create(auto_reconnect=False, poolsize=self.poolsize) current = self.open_fd() for i in range(self.poolsize): yield from c.get("key1") # закрываем случайное активное соединение c.transport.close() # в close выполняется loop.call_soon() yield from asyncio.sleep(self.conn_wait) self.assertEqual(self.open_fd(), current - i - 1) print(self.open_fd())
def testPoolReconnect(self): c = yield from Pool.create(auto_reconnect=True, poolsize=self.poolsize) current = self.open_fd() for _ in range(20): print(self.open_fd()) # опять, нельзя на Task в состоянии CANCELLED сделать set_exception, # что происходит в c.transport.close # поэтому отмену делаем через закрытие по таймеру task = c.get("key1") protocol = task.gi_frame.f_locals['protocol_self'] asyncio.get_event_loop().call_later(self.cmd_wait, protocol.transport.close) with self.assertRaises(asyncio_redis.ConnectionLostError): yield from task yield from asyncio.sleep(self.sleep) # реконнект в действии self.assertEqual(self.open_fd(), current)
async def db(self): if not self.ready.is_set(): self.init() await self.ready.wait() # block until the module has loaded if self.pool is None: if self.create_pool_task is not None: while self.create_pool_task is not None or self.pool is None: await asyncio.sleep(1) return self.pool self.create_pool_task = asyncio.get_event_loop().create_task(Pool.create(host=self.host, port=int(self.port), poolsize=int(self.pool_size), db=int(self.default_db))) await asyncio.wait_for(self.create_pool_task, 10) try: self.pool = self.create_pool_task.result() except Exception as ex: print(ex) self.create_pool_task = None return self.pool
def redis(self): args, poolsize, prefix = self.environ['galerka.redis-args'] return (yield from Pool.create(poolsize=poolsize, **args))