def _kill(self, stream=None, timeout=5): """ Kill the local worker process Blocks until both the process is down and the scheduler is properly informed """ while not self.worker_port: yield gen.sleep(0.1) if self.process is not None: try: # Ask worker to close worker = rpc(ip='127.0.0.1', port=self.worker_port) result = yield gen.with_timeout( timedelta(seconds=min(1, timeout)), worker.terminate(report=False), io_loop=self.loop) except gen.TimeoutError: logger.info("Worker non-responsive. Terminating.") except StreamClosedError: pass except Exception as e: logger.exception(e) try: # Tell scheduler that worker is gone result = yield gen.with_timeout(timedelta(seconds=timeout), self.scheduler.unregister(address=self.worker_address), io_loop=self.loop) if result not in ('OK', 'already-removed'): logger.critical("Unable to unregister with scheduler %s. " "Nanny: %s, Worker: %s", result, self.address_tuple, self.worker_address) else: logger.info("Unregister worker %s:%d from scheduler", self.ip, self.worker_port) except gen.TimeoutError: logger.info("Nanny %s:%d failed to unregister worker %s:%d", self.ip, self.port, self.ip, self.worker_port, exc_info=True) except StreamClosedError: pass except Exception as e: logger.exception(e) if self.process: with ignoring(OSError): self.process.terminate() if self.process in processes_to_close: processes_to_close.remove(self.process) start = time() while isalive(self.process) and time() < start + timeout: sleep(0.01) self.process = None self.cleanup() logger.info("Nanny %s:%d kills worker process %s:%d", self.ip, self.port, self.ip, self.worker_port) raise gen.Return('OK')
def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, max_buffer_size=None, source_ip=None, source_port=None, timeout=None): """Connect to the given host and port. Asynchronously returns an `.IOStream` (or `.SSLIOStream` if ``ssl_options`` is not None). Using the ``source_ip`` kwarg, one can specify the source IP address to use when establishing the connection. In case the user needs to resolve and use a specific interface, it has to be handled outside of Tornado as this depends very much on the platform. Raises `TimeoutError` if the input future does not complete before ``timeout``, which may be specified in any form allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time relative to `.IOLoop.time`) Similarly, when the user requires a certain source port, it can be specified using the ``source_port`` arg. .. versionchanged:: 4.5 Added the ``source_ip`` and ``source_port`` arguments. .. versionchanged:: 5.0 Added the ``timeout`` argument. """ if timeout is not None: if isinstance(timeout, numbers.Real): timeout = IOLoop.current().time() + timeout elif isinstance(timeout, datetime.timedelta): timeout = IOLoop.current().time() + timedelta_to_seconds(timeout) else: raise TypeError("Unsupported timeout %r" % timeout) if timeout is not None: addrinfo = yield gen.with_timeout( timeout, self.resolver.resolve(host, port, af)) else: addrinfo = yield self.resolver.resolve(host, port, af) connector = _Connector( addrinfo, functools.partial(self._create_stream, max_buffer_size, source_ip=source_ip, source_port=source_port) ) af, addr, stream = yield connector.start(connect_timeout=timeout) # TODO: For better performance we could cache the (af, addr) # information here and re-use it on subsequent connections to # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) if ssl_options is not None: if timeout is not None: stream = yield gen.with_timeout(timeout, stream.start_tls( False, ssl_options=ssl_options, server_hostname=host)) else: stream = yield stream.start_tls(False, ssl_options=ssl_options, server_hostname=host) raise gen.Return(stream)
def locator_health_check(self, period=5): wait_timeot = datetime.timedelta(seconds=period) while True: try: self.logger.debug("check health status of locator via cluster method") channel = yield gen.with_timeout(wait_timeot, self.locator.cluster()) cluster = yield gen.with_timeout(wait_timeot, channel.rx.get()) self.locator_status = True self.logger.debug("dumped cluster %s", cluster) yield gen.sleep(period) except Exception as err: self.logger.error("health status check failed: %s", err) self.locator_status = False yield gen.sleep(1)
def _connect(self, conn_type, session, force_close, force_release, pool): future = self._future_class() request = self._connector(self._url) if self._timeout: future_conn = with_timeout(timeout, websocket_connect(request)) else: future_conn = websocket_connect(request) def get_conn(f): try: conn = f.result() except socket.error: future.set_exception( RuntimeError("Could not connect to server.")) except socket.gaierror: future.set_exception( RuntimeError("Could not connect to server.")) except HTTPError as e: future.set_exception(e) except Exception as e: future.set_exception(e) else: resp = Response(conn, self._future_class, self._loop) gc = conn_type(resp, self._future_class, self._timeout, self._username, self._password, self._loop, force_close, pool, force_release, session) future.set_result(gc) future_conn.add_done_callback(get_conn) return future
def acquire(self, stream=None, name=None, id=None, timeout=None): with log_errors(): if isinstance(name, list): name = tuple(name) if name not in self.ids: result = True else: while name in self.ids: event = tornado.locks.Event() self.events[name].append(event) future = event.wait() if timeout is not None: future = gen.with_timeout(timedelta(seconds=timeout), future) try: yield future except gen.TimeoutError: result = False break else: result = True finally: event2 = self.events[name].popleft() assert event is event2 if result: assert name not in self.ids self.ids[name] = id raise gen.Return(result)
def connect(self): try: if not proxy: host, port = self.request.uri.split(':') else: host, port = proxy[0], proxy[1] # timeout(check_host(host,port)) remote = yield gen.with_timeout(tornado.ioloop.IOLoop.current().time()+5, tornado.tcpclient.TCPClient().connect(host, int(port))) except: # raise tornado.web.HTTPError(504) self.request.connection.close() return self._auto_finish = False client = self.request.connection.detach() yield client.write(b'HTTP/1.0 200 Connection established\r\n\r\n') try: fw = remote.set_close_callback(gen.Callback(remote)) client.read_until_close(lambda x: x, streaming_callback=lambda x: remote.write(x)) remote.read_until_close(lambda x: x, streaming_callback=lambda x: client.write(x)) except: print "https closed" client.close() remote.close() yield [ gen.Task(client.set_close_callback), gen.Task(remote.set_close_callback), ]
def test_completes_before_timeout(self): future = Future() self.io_loop.add_timeout(datetime.timedelta(seconds=0.1), lambda: future.set_result('asdf')) result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future, io_loop=self.io_loop) self.assertEqual(result, 'asdf')
def test_no_ref(self): # In this usage, there is no direct hard reference to the # WaitIterator itself, only the Future it returns. Since # WaitIterator uses weak references internally to improve GC # performance, this used to cause problems. yield gen.with_timeout(datetime.timedelta(seconds=0.1), gen.WaitIterator(gen.sleep(0)).next())
def stop_single_user(self, user): if user.stop_pending: raise RuntimeError("Stop already pending for: %s" % user.name) tic = IOLoop.current().time() yield self.proxy.delete_user(user) f = user.stop() @gen.coroutine def finish_stop(f=None): """Finish the stop action by noticing that the user is stopped. If the spawner is slow to stop, this is passed as an async callback, otherwise it is called immediately. """ if f and f.exception() is not None: # failed, don't do anything return toc = IOLoop.current().time() self.log.info("User %s server took %.3f seconds to stop", user.name, toc-tic) try: yield gen.with_timeout(timedelta(seconds=self.slow_stop_timeout), f) except gen.TimeoutError: if user.stop_pending: # hit timeout, but stop is still pending self.log.warning("User %s server is slow to stop", user.name) # schedule finish for when the server finishes stopping IOLoop.current().add_future(f, finish_stop) else: raise else: yield finish_stop()
def wait(self, timeout: Union[float, datetime.timedelta] = None) -> "Future[None]": """Block until the internal flag is true. Returns a Future, which raises `tornado.util.TimeoutError` after a timeout. """ fut = Future() # type: Future[None] if self._value: fut.set_result(None) return fut self._waiters.add(fut) fut.add_done_callback(lambda fut: self._waiters.remove(fut)) if timeout is None: return fut else: timeout_fut = gen.with_timeout( timeout, fut, quiet_exceptions=(CancelledError,) ) # This is a slightly clumsy workaround for the fact that # gen.with_timeout doesn't cancel its futures. Cancelling # fut will remove it from the waiters list. timeout_fut.add_done_callback( lambda tf: fut.cancel() if not fut.done() else None ) return timeout_fut
def stop_single_user(self, user, name=''): if name not in user.spawners: raise KeyError("User %s has no such spawner %r", user.name, name) spawner = user.spawners[name] if spawner.pending: raise RuntimeError("%s pending %s" % (spawner._log_name, spawner.pending)) # set user._stop_pending before doing anything async # to avoid races spawner._stop_pending = True @gen.coroutine def stop(): """Stop the server 1. remove it from the proxy 2. stop the server 3. notice that it stopped """ tic = IOLoop.current().time() try: yield self.proxy.delete_user(user, name) yield user.stop(name) finally: spawner._stop_pending = False toc = IOLoop.current().time() self.log.info("User %s server took %.3f seconds to stop", user.name, toc - tic) try: yield gen.with_timeout(timedelta(seconds=self.slow_stop_timeout), stop()) except gen.TimeoutError: if spawner._stop_pending: # hit timeout, but stop is still pending self.log.warning("User %s:%s server is slow to stop", user.name, name) else: raise
def on_message(self, message): self.node_key = message if len(message) != 32: self.write_message({"error":"invalid node sn "}) self.connected = False self.close() self.cur_conn = self.find_node(message) if not self.cur_conn: self.node_offline() return #clear the events buffered before any websocket client connected self.cur_conn.event_happened = [] while self.connected: self.future = self.wait_event_post() event = None try: event = yield gen.with_timeout(timedelta(seconds=5), self.future, io_loop=ioloop.IOLoop.current()) except gen.TimeoutError: if not self.cur_conn or self.cur_conn.killed: gen_log.debug("node %s is offline" % message) self.cur_conn = self.find_node(message) if not self.cur_conn: self.node_offline() if event: self.write_message(event) yield gen.moment
def maybeTimeout(stream,timeout,future): if timeout: return gen.with_timeout( stream.io_loop.time() + timeout, future, io_loop=stream.io_loop) return future
def run_traffic_jam(nsends, nbytes): # This test eats `nsends * nbytes` bytes in RAM np = pytest.importorskip('numpy') from distributed.protocol import to_serialize data = bytes(np.random.randint(0, 255, size=(nbytes,)).astype('u1').data) with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval=0.01) b.start(comm) msg = {'x': to_serialize(data)} for i in range(nsends): b.send(assoc(msg, 'i', i)) if np.random.random() > 0.5: yield gen.sleep(0.001) results = [] count = 0 while len(results) < nsends: # If this times out then I think it's a backpressure issue # Somehow we're able to flood the socket so that the receiving end # loses some of our messages L = yield gen.with_timeout(timedelta(seconds=5), comm.read()) count += 1 results.extend(r['i'] for r in L) assert count == b.batch_count == e.count assert b.message_count == nsends assert results == list(range(nsends)) comm.close() # external closing yield b.close()
def test_serializers(): with echo_server() as e: comm = yield connect(e.address) b = BatchedSend(interval='10ms', serializers=['msgpack']) b.start(comm) b.send({'x': to_serialize(123)}) b.send({'x': to_serialize('hello')}) yield gen.sleep(0.100) b.send({'x': to_serialize(lambda x: x + 1)}) with captured_logger('distributed.protocol') as sio: yield gen.sleep(0.100) value = sio.getvalue() assert 'serialize' in value assert 'type' in value assert 'function' in value msg = yield comm.read() assert list(msg) == [{'x': 123}, {'x': 'hello'}] with pytest.raises(gen.TimeoutError): msg = yield gen.with_timeout(timedelta(milliseconds=100), comm.read())
def test_normal_concurrent_future(self): # A conccurrent future that resolves while waiting for the timeout. with futures.ThreadPoolExecutor(1) as executor: yield gen.with_timeout( datetime.timedelta(seconds=3600), executor.submit(lambda: time.sleep(0.01)), )
def recv(): while True: result = yield gen.with_timeout(timedelta(seconds=1), read(stream)) print(result) L.extend(result) if result[-1] == 9999: break
def test_async_with_timeout(self): async def f1(): return 42 result = yield gen.with_timeout(datetime.timedelta(hours=1), f1()) self.assertEqual(result, 42) self.finished = True
def test_dataframes(e, s, a, b): dfs = [pd.DataFrame({'x': np.random.random(100), 'y': np.random.random(100)}, index=list(range(i, i + 100))) for i in range(0, 100*10, 100)] remote_dfs = e.map(lambda x: x, dfs) rdf = yield _futures_to_dask_dataframe(remote_dfs, divisions=True) name = 'foo' ldf = dd.DataFrame({(name, i): df for i, df in enumerate(dfs)}, name, dfs[0].columns, list(range(0, 1000, 100)) + [999]) assert rdf.divisions == ldf.divisions remote = e.compute(rdf) result = yield remote._result() tm.assert_frame_equal(result, ldf.compute(get=dask.get)) exprs = [lambda df: df.x.mean(), lambda df: df.y.std(), lambda df: df.assign(z=df.x + df.y).drop_duplicates(), lambda df: df.index, lambda df: df.x, lambda df: df.x.cumsum(), lambda df: df.groupby(['x', 'y']).count(), lambda df: df.loc[50:75]] for f in exprs: local = f(ldf).compute(get=dask.get) remote = e.compute(f(rdf)) remote = yield gen.with_timeout(timedelta(seconds=5), remote._result()) assert_equal(local, remote)
def _kill(self, stream=None, timeout=5): """ Kill the local worker process Blocks until both the process is down and the center is properly informed """ while not self.worker_port: yield gen.sleep(0.1) if self.process is not None: try: result = yield gen.with_timeout(timedelta(seconds=timeout), self.center.unregister(address=self.worker_address)) if result not in ('OK', 'already-removed'): logger.critical("Unable to unregister with center %s. " "Nanny: %s, Worker: %s", result, self.address_tuple, self.worker_address) else: logger.info("Unregister worker %s:%d from center", self.ip, self.worker_port) except gen.TimeoutError: logger.info("Nanny %s:%d failed to unregister worker %s:%d", self.ip, self.port, self.ip, self.worker_port, exc_info=True) self.process.terminate() self.process.join(timeout=timeout) self.process = None self.cleanup() logger.info("Nanny %s:%d kills worker process %s:%d", self.ip, self.port, self.ip, self.worker_port) raise gen.Return('OK')
def test_completed_concurrent_future(self): # A concurrent future that is resolved before we even submit it # to with_timeout. with futures.ThreadPoolExecutor(1) as executor: f = executor.submit(lambda: None) f.result() # wait for completion yield gen.with_timeout(datetime.timedelta(seconds=3600), f)
def test_timeout_concurrent_future(self): # A concurrent future that does not resolve before the timeout. with futures.ThreadPoolExecutor(1) as executor: with self.assertRaises(gen.TimeoutError): yield gen.with_timeout( self.io_loop.time(), executor.submit(time.sleep, 0.1) )
def f(): scheduler = rpc(ip=nannies[0].scheduler.ip, port=nannies[0].scheduler.port) if not no_nanny: yield gen.with_timeout(timedelta(seconds=2), All([scheduler.unregister(address=n.worker_address, close=True) for n in nannies if n.process and n.worker_port]), io_loop=loop2)
def test_stress_creation_and_deletion(c, s): # Assertions are handled by the validate mechanism in the scheduler s.allowed_failures = 100000 da = pytest.importorskip('dask.array') x = da.random.random(size=(2000, 2000), chunks=(100, 100)) y = (x + 1).T + (x * 2) - x.mean(axis=1) z = c.persist(y) @gen.coroutine def create_and_destroy_worker(delay): start = time() while time() < start + 5: n = Nanny(s.address, ncores=2, loop=s.loop) n.start(0) yield gen.sleep(delay) yield n._close() print("Killed nanny") yield gen.with_timeout(timedelta(minutes=1), All([create_and_destroy_worker(0.1 * i) for i in range(20)]))
def test_completes_before_timeout(self): future = Future() # type: Future[str] self.io_loop.add_timeout( datetime.timedelta(seconds=0.1), lambda: future.set_result("asdf") ) result = yield gen.with_timeout(datetime.timedelta(seconds=3600), future) self.assertEqual(result, "asdf")
def enter(self, timeout=None): log.debug("Entering double barrier %s", self.base_path) time_limit = None if timeout is not None: time_limit = time.time() + timeout barrier_lifted = self.client.wait_for_event( WatchEvent.CREATED, self.sentinel_path ) if time_limit: barrier_lifted = gen.with_timeout(barrier_lifted, time_limit) exists = yield self.client.exists(path=self.sentinel_path, watch=True) yield self.create_unique_znode("worker") _, participants = yield self.analyze_siblings() if exists: return elif len(participants) >= self.min_participants: yield self.create_znode(self.sentinel_path) return try: yield barrier_lifted except gen.TimeoutError: raise exc.TimeoutError
def start(self, image=None): """start the single-user server in a docker container""" tmp_dir = mkdtemp(suffix='everware') yield self.git('clone', self.repo_url, tmp_dir) # is this blocking? # use the username, git repo URL and HEAD commit sha to derive # the image name repo = git.Repo(tmp_dir) self.repo_sha = repo.rev_parse("HEAD") image_name = "everware/{}-{}-{}".format(self.user.name, self.escaped_repo_url, self.repo_sha) self.log.debug("Building image {}".format(image_name)) build_log = yield gen.with_timeout(timedelta(30), self.docker('build', path=tmp_dir, tag=image_name, rm=True) ) self.log.debug("".join(str(line) for line in build_log)) self.log.info("Built docker image {}".format(image_name)) images = yield self.docker('images', image_name) self.log.debug(images) yield super(CustomDockerSpawner, self).start( image=image_name )
def test_fails_before_timeout(self): future = Future() self.io_loop.add_timeout( datetime.timedelta(seconds=0.1), lambda: future.set_exception(ZeroDivisionError)) with self.assertRaises(ZeroDivisionError): yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
def spawn_single_user(self, user, options=None): if user.spawn_pending: raise RuntimeError("Spawn already pending for: %s" % user.name) tic = IOLoop.current().time() f = user.spawn(options) @gen.coroutine def finish_user_spawn(f=None): """Finish the user spawn by registering listeners and notifying the proxy. If the spawner is slow to start, this is passed as an async callback, otherwise it is called immediately. """ if f and f.exception() is not None: # failed, don't add to the proxy return toc = IOLoop.current().time() self.log.info("User %s server took %.3f seconds to start", user.name, toc-tic) yield self.proxy.add_user(user) user.spawner.add_poll_callback(self.user_stopped, user) try: yield gen.with_timeout(timedelta(seconds=self.slow_spawn_timeout), f) except gen.TimeoutError: if user.spawn_pending: # hit timeout, but spawn is still pending self.log.warn("User %s server is slow to start", user.name) # schedule finish for when the user finishes spawning IOLoop.current().add_future(f, finish_user_spawn) else: raise else: yield finish_user_spawn()
def enquire(self, user, rider): yield self.sender({ 'chat_id': user['chat_id'], 'text': 'request for a ride from:' }) yield self.sender({ 'chat_id': user['chat_id'], 'location': rider['current_location'] }) yield self.sender({ 'chat_id': user['chat_id'], 'text': 'to:' }) yield self.sender({ 'chat_id': user['chat_id'], 'location': rider['target_location'] }) yield self.sender({ 'chat_id': user['chat_id'], 'text': 'how much do you charge for it? (example answer: 25)' }) DriverQueue.futures[user['chat_id']] = gen.with_timeout( timedelta(seconds=self.enquire_timeout), Future()) try: bid = yield DriverQueue.futures[user['chat_id']] return (bid, user) except: pass finally: DriverQueue.futures.pop(user['chat_id'])
def instantiate(self, comm=None): """ Start a local worker process Blocks until the process is up and the scheduler is properly informed """ if self._listen_address: start_arg = self._listen_address else: host = self.listener.bound_address[0] start_arg = self.listener.prefix + unparse_host_port(host, self._given_worker_port) if self.process is None: self.process = WorkerProcess( worker_args=(self.scheduler_addr,), worker_kwargs=dict(ncores=self.ncores, local_dir=self.local_dir, services=self.services, service_ports={'nanny': self.port}, name=self.name, memory_limit=self.memory_limit, reconnect=self.reconnect, resources=self.resources, validate=self.validate, silence_logs=self.silence_logs, death_timeout=self.death_timeout, preload=self.preload, preload_argv=self.preload_argv, security=self.security, contact_address=self.contact_address), worker_start_args=(start_arg,), silence_logs=self.silence_logs, on_exit=self._on_exit, ) self.auto_restart = True if self.death_timeout: try: result = yield gen.with_timeout( timedelta(seconds=self.death_timeout), self.process.start() ) except gen.TimeoutError: yield self._close(timeout=self.death_timeout) raise gen.Return('timed out') else: result = yield self.process.start() raise gen.Return(result)
def test_stress_scatter_death(c, s, *workers): import random s.allowed_failures = 1000 np = pytest.importorskip('numpy') L = yield c._scatter([np.random.random(10000) for i in range(len(workers))]) yield c._replicate(L, n=2) adds = [delayed(slowadd, pure=True)(random.choice(L), random.choice(L), delay=0.05) for i in range(50)] adds = [delayed(slowadd, pure=True)(a, b, delay=0.02) for a, b in sliding_window(2, adds)] futures = c.compute(adds) alive = list(workers) from distributed.scheduler import logger for i in range(7): yield gen.sleep(0.1) try: s.validate_state() except Exception as c: logger.exception(c) if config.get('log-on-err'): import pdb; pdb.set_trace() else: raise w = random.choice(alive) yield w._close() alive.remove(w) try: yield gen.with_timeout(timedelta(seconds=20), c._gather(futures)) except gen.TimeoutError: ws = {w.address: w for w in workers if w.status != 'closed'} print(s.processing) print(ws) print(futures) if config.get('log-on-err'): import pdb; pdb.set_trace() else: raise except CancelledError: pass
def connect(addr, timeout=None, deserialize=True, connection_args=None): """ Connect to the given address (a URI such as ``tcp://127.0.0.1:1234``) and yield a ``Comm`` object. If the connection attempt fails, it is retried until the *timeout* is expired. """ if timeout is None: timeout = dask.config.get('distributed.comm.timeouts.connect') timeout = parse_timedelta(timeout, default='seconds') scheme, loc = parse_address(addr) backend = registry.get_backend(scheme) connector = backend.get_connector() start = time() deadline = start + timeout error = None def _raise(error): error = error or "connect() didn't finish in time" msg = ("Timed out trying to connect to %r after %s s: %s" % (addr, timeout, error)) raise IOError(msg) # This starts a thread while True: try: future = connector.connect(loc, deserialize=deserialize, **(connection_args or {})) comm = yield gen.with_timeout(timedelta(seconds=deadline - time()), future, quiet_exceptions=EnvironmentError) except FatalCommClosedError: raise except EnvironmentError as e: error = str(e) if time() < deadline: yield gen.sleep(0.01) logger.debug("sleeping on connect") else: _raise(error) except gen.TimeoutError: _raise(error) else: break raise gen.Return(comm)
def timeout(self, f, *args, **kwargs): """Wraps a Coroutine method in a timeout. Used to wrap the self.execute() method in a timeout that will raise an ActorTimedOut exception if an actor takes too long to execute. *Note, Tornado 4+ does not allow you to actually kill a task on the IOLoop.* This means that all we are doing here is notifying the caller (through the raised exception) that a problem has happened. Fairly simple Actors should actually 'stop executing' when this exception is raised. Complex actors with very unique behaviors though (like the rightsacle.server_array.Execute actor) have the ability to continue to execute in the background until the Kingpin application quits. It is not the job of this method to try to kill these actors, but just to let the user know that a failure has happened. """ # Get our timeout setting, or fallback to the default self.log.debug('%s.%s() deadline: %s(s)' % (self._type, f.__name__, self._timeout)) # Get our Future object but don't yield on it yet, This starts the # execution, but allows us to wrap it below with the # 'gen.with_timeout' function. fut = f(*args, **kwargs) # If no timeout is set (none, or 0), then we just yield the Future and # return its results. if not self._timeout: ret = yield fut raise gen.Return(ret) # Generate a timestamp in the future at which point we will raise # an alarm if the actor is still executing deadline = time.time() + float(self._timeout) # Now we yield on the gen_with_timeout function try: ret = yield gen.with_timeout( deadline, fut, quiet_exceptions=(exceptions.ActorTimedOut)) except gen.TimeoutError: msg = ('%s.%s() execution exceeded deadline: %ss' % (self._type, f.__name__, self._timeout)) self.log.error(msg) raise exceptions.ActorTimedOut(msg) raise gen.Return(ret)
def post(self, *args, **kwargs): data = json.loads(self.request.body.decode("utf-8")) id_list = data.get('id_list', None) if not id_list: return self.write(dict(code=1, msg='关键参数不能为空')) try: # 超过120s 返回Timeout yield gen.with_timeout(datetime.timedelta(seconds=120), [self.asset_update(id_list)], quiet_exceptions=tornado.gen.TimeoutError) except gen.TimeoutError: return self.write(dict(code=-2, msg='TimeOut')) return self.write( dict(code=0, msg='任务执行完成,提醒: 完成状态为:True, 错误状态:False, False状态下可点击查看日志进行排错'))
def acquire(self, stream=None, name=None, id=None, timeout=None): with log_errors(): if name in self.ids: event = tornado.locks.Event() self.events[name].append(event) future = event.wait() if timeout is not None: future = gen.with_timeout(timedelta(seconds=timeout), future) try: yield future finally: event2 = self.events[name].popleft() assert event is event2 assert name not in self.ids self.ids[name] = id
def run_updates(): def func(): try: print 'Checking notices...' update.check_notices() except: print "Unhandled error occured :\n{}".format( traceback.format_exc()) try: with ThreadPoolExecutor(max_workers=1) as executor: yield gen.with_timeout(datetime.timedelta(UPDATE_PERIOD / 1000.0), executor.submit(func)) print 'run_updates done' except gen.TimeoutError: print 'run_updates timed out'
def test_calculation_future_broadcasts(self): manager = get_manager() runner = manager.get_runner() process = test_processes.DummyProcess() # No polling future = processes.futures.CalculationFuture( pk=process.pid, poll_interval=None, communicator=manager.get_communicator()) run(process) calc_node = runner.run_until_complete( gen.with_timeout(self.TIMEOUT, future)) self.assertEqual(process.node.pk, calc_node.pk)
def _unregister(self, timeout=10): if self.process is None: return worker_address = self.process.worker_address if worker_address is None: return allowed_errors = (gen.TimeoutError, CommClosedError, EnvironmentError, RPCClosed) try: yield gen.with_timeout( timedelta(seconds=timeout), self.scheduler.unregister(address=self.worker_address), quiet_exceptions=allowed_errors) except allowed_errors: pass
def test_calculation_future_polling(self): """Test calculation future polling.""" runner = get_manager().get_runner() process = test_processes.DummyProcess() # No communicator future = processes.futures.CalculationFuture(pk=process.pid, loop=runner.loop, poll_interval=0) runner.run(process) calc_node = runner.run_until_complete( gen.with_timeout(self.TIMEOUT, future)) self.assertEqual(process.node.pk, calc_node.pk)
def close(self, timeout): if self.closing: return self.closing = True pending_with_timeouts = [] for pending in self.drain_all_pending(): pending_with_timeouts.append(gen.with_timeout(timeout, pending)) try: yield list(pending_with_timeouts) except gen.TimeoutError: yield self.abort(exception=exc.TimeoutError) finally: self.stream.close()
def connect(ip, port, timeout=3): client = TCPClient() start = time() while True: future = client.connect(ip, port, max_buffer_size=MAX_BUFFER_SIZE) try: stream = yield gen.with_timeout(timedelta(seconds=timeout), future) raise Return(stream) except StreamClosedError: if time() - start < timeout: yield gen.sleep(0.01) logger.debug("sleeping on connect") else: raise except gen.TimeoutError: raise IOError("Timed out while connecting to %s:%d" % (ip, port))
def restart(self, comm=None, timeout=2, executor_wait=True): start = time() @gen.coroutine def _(): if self.process is not None: yield self.kill() yield self.instantiate() try: yield gen.with_timeout(timedelta(seconds=timeout), _()) except gen.TimeoutError: logger.error("Restart timed out, returning before finished") raise gen.Return("timed out") else: raise gen.Return("OK")
def f(): try: if main_tid == get_thread_identity(): raise RuntimeError("sync() called from thread of running loop") yield gen.moment thread_state.asynchronous = True future = func(*args, **kwargs) if callback_timeout is not None: future = gen.with_timeout(timedelta(seconds=callback_timeout), future) result[0] = yield future except Exception as exc: error[0] = sys.exc_info() finally: thread_state.asynchronous = False e.set()
def _close(self, report=True, timeout=10): if report: yield gen.with_timeout(timedelta(seconds=timeout), self.center.unregister(address=(self.ip, self.port)), io_loop=self.loop) self.center.close_streams() self.stop() self.executor.shutdown() if os.path.exists(self.local_dir): shutil.rmtree(self.local_dir) for k, v in self.services.items(): v.stop() self.status = 'closed' self.stop()
def read_stream_body(self, delegate, chunk_size=1, stream_callback=None): _delegate, delegate = self._parse_delegate(delegate) remain_content = False need_delegate_close = True if not _delegate.skip_body: try: body_future = self._read_stream_body(chunk_size, delegate) if body_future is not None: if self._body_timeout is None: remain_content = yield body_future else: try: remain_content = yield gen.with_timeout( self.stream.io_loop.time() + self._body_timeout, body_future, quiet_exceptions=StreamClosedError) except gen.TimeoutError: gen_log.info("Timeout reading body from %s", self.context) self.stream.close() remain_content = False need_delegate_close = False if not remain_content: self._read_finished = True if (not self._finish_future.done() and self.stream is not None and not self.stream.closed()): self.stream.set_close_callback( self._on_connection_close) yield self._finish_future if self._disconnect_on_finish: self.close() except HTTPInputError as e: gen_log.info("Malformed HTTP message from %s: %s", self.context, e) self.close() remain_content = False finally: if need_delegate_close: with _ExceptionLoggingContext(app_log): delegate.on_connection_close(self.stream.error) if not remain_content: self._clear_callbacks() raise gen.Return(remain_content)
def wait_on_sibling(self, sibling, time_limit=None): log.debug("Waiting on sibling %s", sibling) path = self.sibling_path(sibling) unblocked = self.client.wait_for_event(WatchEvent.DELETED, path) if time_limit: unblocked = gen.with_timeout(time_limit, unblocked) exists = yield self.client.exists(path=path, watch=True) if not exists: unblocked.set_result(None) try: yield unblocked except gen.TimeoutError: raise exc.TimeoutError
def test_gc(self): """Runners shouldn't GC if future is alive""" # Create the weakref weakref_scope = [None] def callback(): gc.collect(2) weakref_scope[0]().set_result(123) @gen.coroutine def tester(): fut = Future() weakref_scope[0] = weakref.ref(fut) self.io_loop.add_callback(callback) yield fut yield gen.with_timeout(datetime.timedelta(seconds=0.2), tester())
def join(self, timeout=None): """ Wait for the child process to exit. This method is a coroutine. """ self._check_closed() assert self._state.pid is not None, 'can only join a started process' if self._state.exitcode is not None: return if timeout is None: yield self._exit_future else: try: yield gen.with_timeout(timedelta(seconds=timeout), self._exit_future) except gen.TimeoutError: pass
def _call_subprocess(self, function_to_evaluate, arguments): restricted_tabpy = RestrictedTabPy( self.protocol, self.port, self.logger, self.eval_timeout ) # Exec does not run the function, so it does not block. exec(function_to_evaluate, globals()) # 'noqa' comments below tell flake8 to ignore undefined _user_script # name - the name is actually defined with user script being wrapped # in _user_script function (constructed as a striong) and then executed # with exec() call above. future = self.executor.submit(_user_script, # noqa: F821 restricted_tabpy, **arguments if arguments is not None else None) ret = yield gen.with_timeout(timedelta(seconds=self.eval_timeout), future) raise gen.Return(ret)
def test_maxsize_release(self): pool = Pool("ws://localhost:8182/", maxsize=2, username="******", password="******") c1 = yield pool.acquire() c2 = yield pool.acquire() c3 = pool.acquire() self.assertIsInstance(c3, Future) with self.assertRaises(tornado.gen.TimeoutError): yield gen.with_timeout(timedelta(seconds=0.1), c3) yield pool.release(c2) c3 = yield c3 self.assertEqual(c2, c3) c1.conn.close() c2.conn.close() c3.conn.close()
def _close(self, report=True, timeout=10): self.heartbeat_callback.stop() with ignoring(RPCClosed, StreamClosedError): if report: yield gen.with_timeout(timedelta(seconds=timeout), self.scheduler.unregister(address=(self.ip, self.port)), io_loop=self.loop) self.scheduler.close_rpc() self.stop() self.executor.shutdown() if os.path.exists(self.local_dir): shutil.rmtree(self.local_dir) for k, v in self.services.items(): v.stop() self.status = 'closed' self.stop()
def post(self, *args, **kwargs): git_list = [] with DBContext('r') as session: git_conf = session.query(GitConf).all() for msg in git_conf: data_dict = model_to_dict(msg) git_list.append(data_dict) try: # 超过60s 返回Timeout res = yield gen.with_timeout(datetime.timedelta(seconds=60), self.sync_git_info(git_list), quiet_exceptions=gen.TimeoutError) return self.write(dict(code=0, msg=res)) except gen.TimeoutError: return self.write(dict(code=-1, msg='TimeOut'))
def send(self, request, for_game=None): """ Send a request. :param request: request object. :param for_game: (optional) NetworkGame object (required for game requests). :return: a Future that returns the response handler result of this request. """ request_future = Future() request_context = RequestFutureContext(request=request, future=request_future, connection=self, game=for_game) self.write_request(request_context).add_done_callback( MessageWrittenCallback(request_context).callback) return gen.with_timeout( timedelta(seconds=constants.REQUEST_TIMEOUT_SECONDS), request_future)
def test_gc(self): # Github issue 1769: Runner objects can get GCed unexpectedly # while their future is alive. weakref_scope = [None] # type: List[Optional[weakref.ReferenceType]] def callback(): gc.collect(2) weakref_scope[0]().set_result(123) # type: ignore @gen.coroutine def tester(): fut = Future() # type: Future[int] weakref_scope[0] = weakref.ref(fut) self.io_loop.add_callback(callback) yield fut yield gen.with_timeout(datetime.timedelta(seconds=0.2), tester())
def test_rate_limit(self): # Get the first batch cursor = yield r.range().run(self.conn) cursor_initial_size = len(cursor.items) # Wait for the second (pre-fetched) batch to arrive yield cursor.new_response cursor_new_size = len(cursor.items) self.assertLess(cursor_initial_size, cursor_new_size) # Wait and observe that no third batch arrives yield self.asyncAssertRaises( gen.TimeoutError, gen.with_timeout(ioloop.IOLoop.current().time() + 2, cursor.new_response)) self.assertEqual(cursor_new_size, len(cursor.items))
def shutdown(self, sync=False): logging.info("Shutdown") with self.mutex: self.done_future = Future() if not sync: self.done_event = threading.Event() self.to_shutdown = True for _ in range(len(self.threads)): self.stop_one_worker() logging.info("Waiting for workers") if sync: self.done_event.wait(timeout=self.shutdown_timeout) return self.done_future else: return with_timeout( timeout=datetime.timedelta(seconds=self.shutdown_timeout), future=self.done_future )
def _get_next_message(self, stream): # get the next message from the stream unpacker = msgpack.Unpacker() try: wire_bytes = yield with_timeout( datetime.timedelta(seconds=PING_TIMEOUT), stream.read_bytes(4096, partial=True)) except StreamClosedError: LOGGER.warn( 'Unable to get next message from {} - stream closed'.format( stream)) else: unpacker.feed(wire_bytes) LOGGER.debug('Deserializing object from stream {}'.format(stream)) message = unpacker.next() message.pop('type') raise Return(message)
def test_zip_timeout(): a = Stream(asynchronous=True) b = Stream(asynchronous=True) c = sz.zip(a, b, maxsize=2) L = c.sink_to_list() a.emit(1) a.emit(2) future = a.emit(3) with pytest.raises(gen.TimeoutError): yield gen.with_timeout(timedelta(seconds=0.01), future) b.emit('a') yield future assert L == [(1, 'a')]
def _execution_future(self, msg_id, interrupt_timeout=None, idle_timeout=None, raise_on_no_idle=False): request_fut = self._request_future(msg_id) if not (interrupt_timeout or idle_timeout): return (yield from request_fut) interrupt_cb = None if interrupt_timeout: interrupt_cb = self.ioloop.call_later(interrupt_timeout, self.interrupt) got_idle_fut = Future() def watch_for_idle(msg, _channel): if msg.header['msg_type'] == 'status' \ and msg.parent_header.get('msg_id') == msg_id \ and msg.content['execution_state'] == 'idle': got_idle_fut.set_result(msg) self.add_handler(watch_for_idle, 'iopub') try: reply = yield from request_fut if interrupt_cb is not None: self.ioloop.remove_timeout(interrupt_cb) if idle_timeout: # Wait for idle message - # this may resolve immediately if we already got it. try: yield from gen.with_timeout( timedelta(seconds=idle_timeout), got_idle_fut) except ioloop.TimeoutError: print("Timed out waiting for idle") if raise_on_no_idle: raise finally: self.remove_handler(watch_for_idle) return reply