def post_teardown(self): if (not IOLoop.initialized() or self.io_loop is not IOLoop.instance()): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) self.io_loop.close(all_fds=True)
def fork_slaves(slave_cnt): from tornado.process import _pipe_cloexec, PipeIOStream is_child, lst = False, [] for i in range(slave_cnt): #r, w = os.pipe() r, w = _pipe_cloexec() # fork можно делать только до создания ioloop'а # (вообще говоря любого), см. tornado.process.fork_processes() assert not IOLoop.initialized() pid = os.fork() is_child = pid == 0 fd = r if is_child else w to_close = w if is_child else r os.close(to_close) if is_child: res = True, (i, PipeIOStream(fd)) # :KLUDGE: а без лишних движений как? for w_fd in lst: os.close(w_fd) break else: lst.append(fd) if not is_child: res = False, [PipeIOStream(fd) for fd in lst] return res
def run(): app = Application(pages, **config) port = random.randrange(options.min_port, options.max_port) app.listen(port, address='0.0.0.0') signal.signal(signal.SIGCHLD, handle_sigchld) args = ['ab'] args.extend(['-n', str(options.n)]) concurrency_level = min(options.c, options.n) args.extend(['-c', str(concurrency_level)]) if options.post_file is not None: args.extend(['-p', options.post_file]) args.extend(['-T', 'application/json']) if options.email is not None: args.extend(['-H', 'Email:{}'.format(options.email)]) if options.token is not None: args.extend(['-H', 'Token:{}'.format(options.token)]) if options.keepalive: args.append('-k') if options.quiet: # just stops the progress messages printed to stderr args.append('-q') args.append('http://127.0.0.1:{}{}'.format(port, options.path)) subprocess.Popen(args) IOLoop.instance().start() IOLoop.instance().close() del IOLoop._instance assert not IOLoop.initialized()
def tearDown(self): self.http_server.stop() self.io_loop.run_sync(self.http_server.close_all_connections) if (not IOLoop.initialized() or self.http_client.io_loop is not IOLoop.instance()): self.http_client.close() super(AsyncHTTPTestCase, self).tearDown()
def install_asyncio() -> None: """Install tornado's loop to asyncio.""" try: from tornado.ioloop import IOLoop from tornado.platform.asyncio import AsyncIOMainLoop if not IOLoop.initialized(): AsyncIOMainLoop().install() except ImportError: pass
def close(self): """CLose http_server, io_loop by sequence, to ensure the environment is cleaned up and invoking `setup` successfully within next test function It is suggested to be called in `TestCase.tearDown` """ self.http_server.stop() if (not IOLoop.initialized() or self.http_client.io_loop is not IOLoop.instance()): self.http_client.close() if (not IOLoop.initialized() or self.io_loop is not IOLoop.instance()): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) self.io_loop.close(all_fds=True)
def tearDown(self): # Clean up Subprocess, so it can be used again with a new ioloop. Subprocess.uninitialize() self.loop.clear_current() if (not IOLoop.initialized() or self.loop is not IOLoop.instance()): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) self.loop.close(all_fds=True) super(TornadoAPITest, self).tearDown()
def close_io_loop(self): if not IOLoop.initialized() or self.io_loop is not IOLoop.instance(): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) # self.io_loop.close(all_fds=True) # Closing all fds leads to errors. I think the client is somehow expecting it's # fd to still be open?? self.io_loop.close()
def start(self): """Start tile server.""" is_running = IOLoop.initialized() self.server = HTTPServer(self.app) self.server.listen(self.port) # NOTE: Check if there is already one server in place # else initiate an new one # When using rio-glui.server.TileServer inside # jupyter Notebook IOLoop is already initialized if not is_running: IOLoop.current().start()
def wrapper(*args, **kwargs): cofunc = coroutine(func) io_loop = IOLoop.current() try: result = io_loop.run_sync(functools.partial(cofunc, *args, **kwargs)) return result finally: io_loop.clear_current() if not IOLoop.initialized() or io_loop is not IOLoop.instance(): io_loop.close(all_fds=True)
def close_io_loop(self): if (not IOLoop.initialized() or self.io_loop is not IOLoop.instance()): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) #self.io_loop.close(all_fds=True) # Closing all fds leads to errors. I think the client is somehow expecting it's # fd to still be open?? self.io_loop.close()
def install_loop(): """Install and return the global ZMQEventLoop registers the loop with asyncio.set_event_loop """ # check if tornado's IOLoop is already initialized to something other # than the pyzmq IOLoop instance: assert (not IOLoop.initialized()) or \ IOLoop.instance() is AsyncIOMainLoop.instance(), "tornado IOLoop already initialized" # First, set asyncio to use ZMQEventLoop (ZMQSelector) as its loop asyncio.set_event_loop_policy(ZMQPolicy()) # Next have tornado work on top of current asyncio loop AsyncIOMainLoop().install()
def tearDown(self): self.io_loop.clear_current() if not IOLoop.initialized() or self.io_loop is not IOLoop.instance(): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) self.io_loop.close(all_fds=True) super(AsyncTestCase, self).tearDown() # In case an exception escaped or the StackContext caught an exception # when there wasn't a wait() to re-raise it, do so here. # This is our last chance to raise an exception in a way that the # unittest machinery understands. self.__rethrow()
def tearDown(self): self.io_loop.clear_current() if (not IOLoop.initialized() or self.io_loop is not IOLoop.instance()): # Try to clean up any file descriptors left open in the ioloop. # This avoids leaks, especially when tests are run repeatedly # in the same process with autoreload (because curl does not # set FD_CLOEXEC on its file descriptors) self.io_loop.close(all_fds=True) super(AsyncTestCase, self).tearDown() # In case an exception escaped or the StackContext caught an exception # when there wasn't a wait() to re-raise it, do so here. # This is our last chance to raise an exception in a way that the # unittest machinery understands. self.__rethrow()
def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys): """Helper for sending a comm message on IOPub""" if threading.current_thread().name != 'MainThread' and IOLoop.initialized(): # make sure we never send on a zmq socket outside the main IOLoop thread IOLoop.instance().add_callback(lambda : self._publish_msg(msg_type, data, metadata, buffers, **keys)) return data = {} if data is None else data metadata = {} if metadata is None else metadata content = json_clean(dict(data=data, comm_id=self.comm_id, **keys)) self.kernel.session.send(self.kernel.iopub_socket, msg_type, content, metadata=json_clean(metadata), parent=self.kernel._parent_header, ident=self.topic, buffers=buffers, )
def test_multi_process(self): self.assertFalse(IOLoop.initialized()) port = get_unused_port() def get_url(path): return "http://127.0.0.1:%d%s" % (port, path) sockets = bind_sockets(port, "127.0.0.1") # ensure that none of these processes live too long signal.alarm(5) # master process try: id = fork_processes(3, max_restarts=3) except SystemExit, e: # if we exit cleanly from fork_processes, all the child processes # finished with status 0 self.assertEqual(e.code, 0) self.assertTrue(task_id() is None) for sock in sockets: sock.close() signal.alarm(0) return
def run(): app = Application([("/", RootHandler)]) port = random.randrange(options.min_port, options.max_port) app.listen(port, address='127.0.0.1') signal.signal(signal.SIGCHLD, handle_sigchld) args = ["ab"] args.extend(["-n", str(options.n)]) args.extend(["-c", str(options.c)]) if options.keepalive: args.append("-k") if options.quiet: # just stops the progress messages printed to stderr args.append("-q") args.append("http://127.0.0.1:%d/" % port) subprocess.Popen(args) IOLoop.instance().start() IOLoop.instance().close() del IOLoop._instance assert not IOLoop.initialized()
def install_asyncio(): if IOLoop.initialized(): logger.debug('Asyncio cannot be installed') return import asyncio from tornado.platform.asyncio import AsyncIOMainLoop try: # use uvloop if available import uvloop except ImportError: pass else: logger.debug('Enabled uvloop') asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) AsyncIOMainLoop().install() logger.debug('Enabled asyncio')
def run(): app = Application([("/", RootHandler)]) port = random.randrange(options.min_port, options.max_port) app.listen(port, address='127.0.0.1') signal.signal(signal.SIGCHLD, handle_sigchld) args = ["ab"] args.extend(["-n", str(options.n)]) args.extend(["-c", str(options.c)]) if options.keepalive: args.append("-k") if options.quiet: # just stops the progress messages printed to stderr args.append("-q") args.append("http://127.0.0.1:%d/" % port) subprocess.Popen(args) IOLoop.instance().start() IOLoop.instance().close(all_fds=True) del IOLoop._instance assert not IOLoop.initialized()
def create_server(port=8080, base_url=None, max_buffer_size=10 * 1024 * 1024, debug=False): """ Run the main event loop. Examples -------- .. code-block:: python import asyncio from quack import create_server, route, aspect, Handler, \ Http401Unauthorized @route('/test') class TestHandler(Handler): @aspect('basic_auth_headers') async def _get(self, basic_auth_headers=None): if basic_auth_headers is None: raise Http401Unauthorized({ 'result' : 'failure', 'reason' : 'No basic authentication headers.' }) return {'user' : basic_auth_headers[0]} create_server() asyncio.get_event_loop().run_forever() """ if not IOLoop.initialized(): logger.debug('Installing the Tornado IOLoop.') AsyncIOMainLoop().install() app = tornado.web.Application(get_routes(base_url), debug=debug) server = tornado.httpserver.HTTPServer(app, max_buffer_size=max_buffer_size) server.listen(port) return server
def tearDown(self): if (not IOLoop.initialized() or self.io_loop is not IOLoop.instance()): self.io_loop.close(all_fds=True) super(AsyncTestCase, self).tearDown()
def test_multi_process(self): self.assertFalse(IOLoop.initialized()) port = get_unused_port() def get_url(path): return "http://127.0.0.1:%d%s" % (port, path) sockets = bind_sockets(port, "127.0.0.1") # ensure that none of these processes live too long signal.alarm(5) # master process id = fork_processes(3, max_restarts=3) if id is None: # back in the master process; everything worked! self.assertTrue(task_id() is None) for sock in sockets: sock.close() signal.alarm(0) return signal.alarm(5) # child process try: if id in (0, 1): signal.alarm(5) self.assertEqual(id, task_id()) server = HTTPServer(self.get_app()) server.add_sockets(sockets) IOLoop.instance().start() elif id == 2: signal.alarm(5) self.assertEqual(id, task_id()) for sock in sockets: sock.close() client = HTTPClient() def fetch(url, fail_ok=False): try: return client.fetch(get_url(url)) except HTTPError as e: if not (fail_ok and e.code == 599): raise # Make two processes exit abnormally fetch("/?exit=2", fail_ok=True) fetch("/?exit=3", fail_ok=True) # They've been restarted, so a new fetch will work int(fetch("/").body) # Now the same with signals # Disabled because on the mac a process dying with a signal # can trigger an "Application exited abnormally; send error # report to Apple?" prompt. #fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True) #fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True) #int(fetch("/").body) # Now kill them normally so they won't be restarted fetch("/?exit=0", fail_ok=True) # One process left; watch it's pid change pid = int(fetch("/").body) fetch("/?exit=4", fail_ok=True) pid2 = int(fetch("/").body) self.assertNotEqual(pid, pid2) # Kill the last one so we shut down cleanly fetch("/?exit=0", fail_ok=True) os._exit(0) except Exception: logging.error("exception in child process %d", id, exc_info=True) raise
def test_multi_process(self): # This test can't work on twisted because we use the global reactor # and have no way to get it back into a sane state after the fork. skip_if_twisted() with ExpectLog(gen_log, "(Starting .* processes|child .* exited|uncaught exception)"): self.assertFalse(IOLoop.initialized()) sock, port = bind_unused_port() def get_url(path): return "http://127.0.0.1:%d%s" % (port, path) # ensure that none of these processes live too long signal.alarm(5) # master process try: id = fork_processes(3, max_restarts=3) self.assertTrue(id is not None) signal.alarm(5) # child processes except SystemExit as e: # if we exit cleanly from fork_processes, all the child processes # finished with status 0 self.assertEqual(e.code, 0) self.assertTrue(task_id() is None) sock.close() return try: if id in (0, 1): self.assertEqual(id, task_id()) server = HTTPServer(self.get_app()) server.add_sockets([sock]) IOLoop.current().start() elif id == 2: self.assertEqual(id, task_id()) sock.close() # Always use SimpleAsyncHTTPClient here; the curl # version appears to get confused sometimes if the # connection gets closed before it's had a chance to # switch from writing mode to reading mode. client = HTTPClient(SimpleAsyncHTTPClient) def fetch(url, fail_ok=False): try: return client.fetch(get_url(url)) except HTTPError as e: if not (fail_ok and e.code == 599): raise # Make two processes exit abnormally fetch("/?exit=2", fail_ok=True) fetch("/?exit=3", fail_ok=True) # They've been restarted, so a new fetch will work int(fetch("/").body) # Now the same with signals # Disabled because on the mac a process dying with a signal # can trigger an "Application exited abnormally; send error # report to Apple?" prompt. # fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True) # fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True) # int(fetch("/").body) # Now kill them normally so they won't be restarted fetch("/?exit=0", fail_ok=True) # One process left; watch it's pid change pid = int(fetch("/").body) fetch("/?exit=4", fail_ok=True) pid2 = int(fetch("/").body) self.assertNotEqual(pid, pid2) # Kill the last one so we shut down cleanly fetch("/?exit=0", fail_ok=True) os._exit(0) except Exception: logging.error("exception in child process %d", id, exc_info=True) raise
def test_multi_process(self): self.assertFalse(IOLoop.initialized()) port = get_unused_port() def get_url(path): return "http://127.0.0.1:%d%s" % (port, path) sockets = bind_sockets(port, "127.0.0.1") # ensure that none of these processes live too long signal.alarm(5) # master process id = fork_processes(3, max_restarts=3) if id is None: # back in the master process; everything worked! self.assertTrue(task_id() is None) for sock in sockets: sock.close() signal.alarm(0) return signal.alarm(5) # child process try: if id in (0, 1): signal.alarm(5) self.assertEqual(id, task_id()) server = HTTPServer(self.get_app()) server.add_sockets(sockets) IOLoop.instance().start() elif id == 2: signal.alarm(5) self.assertEqual(id, task_id()) for sock in sockets: sock.close() client = HTTPClient() def fetch(url, fail_ok=False): try: return client.fetch(get_url(url)) except HTTPError, e: if not (fail_ok and e.code == 599): raise # Make two processes exit abnormally fetch("/?exit=2", fail_ok=True) fetch("/?exit=3", fail_ok=True) # They've been restarted, so a new fetch will work int(fetch("/").body) # Now the same with signals # Disabled because on the mac a process dying with a signal # can trigger an "Application exited abnormally; send error # report to Apple?" prompt. #fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True) #fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True) #int(fetch("/").body) # Now kill them normally so they won't be restarted fetch("/?exit=0", fail_ok=True) # One process left; watch it's pid change pid = int(fetch("/").body) fetch("/?exit=4", fail_ok=True) pid2 = int(fetch("/").body) self.assertNotEqual(pid, pid2) # Kill the last one so we shut down cleanly fetch("/?exit=0", fail_ok=True) os._exit(0) except Exception: logging.error("exception in child process %d", id, exc_info=True) raise
def test_multi_process(self): # This test can't work on twisted because we use the global reactor # and have no way to get it back into a sane state after the fork. skip_if_twisted() with ExpectLog(gen_log, "(Starting .* processes|child .* exited|uncaught exception)"): self.assertFalse(IOLoop.initialized()) sock, port = bind_unused_port() def get_url(path): return "http://127.0.0.1:%d%s" % (port, path) # ensure that none of these processes live too long signal.alarm(5) # master process try: id = fork_processes(3, max_restarts=3) self.assertTrue(id is not None) signal.alarm(5) # child processes except SystemExit as e: # if we exit cleanly from fork_processes, all the child processes # finished with status 0 self.assertEqual(e.code, 0) self.assertTrue(task_id() is None) sock.close() return try: if id in (0, 1): self.assertEqual(id, task_id()) server = HTTPServer(self.get_app()) server.add_sockets([sock]) IOLoop.instance().start() elif id == 2: self.assertEqual(id, task_id()) sock.close() # Always use SimpleAsyncHTTPClient here; the curl # version appears to get confused sometimes if the # connection gets closed before it's had a chance to # switch from writing mode to reading mode. client = HTTPClient(SimpleAsyncHTTPClient) def fetch(url, fail_ok=False): try: return client.fetch(get_url(url)) except HTTPError as e: if not (fail_ok and e.code == 599): raise # Make two processes exit abnormally fetch("/?exit=2", fail_ok=True) fetch("/?exit=3", fail_ok=True) # They've been restarted, so a new fetch will work int(fetch("/").body) # Now the same with signals # Disabled because on the mac a process dying with a signal # can trigger an "Application exited abnormally; send error # report to Apple?" prompt. #fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True) #fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True) #int(fetch("/").body) # Now kill them normally so they won't be restarted fetch("/?exit=0", fail_ok=True) # One process left; watch it's pid change pid = int(fetch("/").body) fetch("/?exit=4", fail_ok=True) pid2 = int(fetch("/").body) self.assertNotEqual(pid, pid2) # Kill the last one so we shut down cleanly fetch("/?exit=0", fail_ok=True) os._exit(0) except Exception: logging.error("exception in child process %d", id, exc_info=True) raise
def install_asyncio() -> None: """Ensure that asyncio's io-loop is installed to tornado.""" from tornado.ioloop import IOLoop from tornado.platform.asyncio import AsyncIOMainLoop if not IOLoop.initialized(): AsyncIOMainLoop().install()
def setUp(self): self.reverters = [] if IOLoop.initialized(): del IOLoop._instance
def test_instance_methods_ensures_singleton(self): io_loop = IOLoop.instance() same_io_loop = IOLoop.instance() self.assertEqual(io_loop, same_io_loop) self.assertTrue(IOLoop.initialized())
def install_asyncio(): from tornado.ioloop import IOLoop from tornado.platform.asyncio import AsyncIOMainLoop '''Ensure that asyncio's io-loop is installed to tornado.''' if not IOLoop.initialized(): AsyncIOMainLoop().install()
def test_ioloop_creation_without_instance_method_does_not_enforce_singleton(self): io_loop = IOLoop() self.assertFalse(IOLoop.initialized()) self.assertNotEqual(io_loop, IOLoop.instance())
def tearDown(self): self.http_server.stop() if not IOLoop.initialized() or self.http_client.io_loop is not IOLoop.instance(): self.http_client.close() super(AsyncHTTPTestCase, self).tearDown()