def _ioloop_call(self, future, tornado_future, fn, args, kwargs): gen.chain_future(tornado_future, future) try: result_future = gen.maybe_future(fn(*args, **kwargs)) gen.chain_future(result_future, tornado_future) except Exception: tornado_future.set_exc_info(sys.exc_info())
def test_stop_cleanup(self): self.client.wait_protocol(timeout=1) mid = 56 future_reply = Future() self.client.ioloop.add_callback( lambda : gen.chain_future(self.client.future_request(Message.request( 'slow-command', 1, mid=mid)), future_reply)) # Force a disconnect self.client.stop() reply, informs = future_reply.result(timeout=1) self.assertEqual(reply, Message.reply( 'slow-command', 'fail', 'Client stopped before reply was received', mid=mid))
def _manage(self, reg=REGISTRY): # Almaceno los parametros del POST en un dict() data = dict((k, self.get_argument(k)) for k in self.request.arguments.keys()) # Utilizo la primera palabra como orden command = data['text'].strip().split() head = command.pop(0) if command else None # Recupero el handler asociado a la orden handler = reg.get(head, None) if head else None future = Future() print("data: %s" % str(data)) if not handler: # Si no hay orden, resuelvo el resultado inmediatamente future.set_result( "Lista de comandos soportados: %s" % ", ".join(reg.keys()) ) else: # Si hay orden, la ejecuto y encadeno mi propio # resultado al resultado de la orden chain_future(handler(command, data), future) # Cuando se resuelva la peticion, escribimos el resultado IOLoop.current().add_future(future, self._commit)
def acquire(self, timeout=None): """Decrement the counter. Returns a Future. Block if the counter is zero and wait for a `.release`. The Future raises `.TimeoutError` after the deadline. """ if self._value > 0: self._value -= 1 future = Future() future.set_result(_ReleasingContextManager(self)) else: waiter = Future() self._waiters.append(waiter) if timeout: future = gen.with_timeout(timeout, waiter, quiet_exceptions=gen.TimeoutError) # Set waiter's exception after the deadline. gen.chain_future(future, waiter) else: future = waiter return future
def test_stop_cleanup(self): self.client.wait_protocol(timeout=1) mid = 56 future_reply = Future() self.client.ioloop.add_callback(lambda: gen.chain_future( self.client.future_request( Message.request('slow-command', 1, mid=mid)), future_reply)) # Force a disconnect self.client.stop() reply, informs = future_reply.result(timeout=1) self.assertEqual( reply, Message.reply('slow-command', 'fail', 'Client stopped before reply was received', mid=mid))
def stop(self, timeout=None, callback=None): """Stop ioloop (if managed) and call callback in ioloop before close. Parameters ---------- timeout : float or None Seconds to wait for ioloop to have *started*. Returns ------- stopped : thread-safe Future Resolves when the callback() is done """ if timeout: self._running.wait(timeout) stopped_future = Future() @gen.coroutine def _stop(): if callback: try: yield gen.maybe_future(callback()) except Exception: self._logger.exception( 'Unhandled exception calling stop callback') if self._ioloop_managed: self._logger.info('Stopping ioloop {0!r}'.format(self._ioloop)) # Allow ioloop to run once before stopping so that callbacks # scheduled by callback() above get a chance to run. yield gen.moment self._ioloop.stop() self._running.clear() try: self._ioloop.add_callback( lambda: gen.chain_future(_stop(), stopped_future)) except AttributeError: # Probably we have been shut-down already pass return stopped_future
def stop(self, timeout=None, callback=None): """Stop ioloop (if managed) and call callback in ioloop before close. Parameters ---------- timeout : float or None Seconds to wait for ioloop to have *started*. Returns ------- stopped : thread-safe Future Resolves when the callback() is done """ if timeout: self._running.wait(timeout) stopped_future = Future() @gen.coroutine def _stop(): if callback: try: yield gen.maybe_future(callback()) except Exception: self._logger.exception('Unhandled exception calling stop callback') if self._ioloop_managed: self._logger.info('Stopping ioloop {0!r}'.format(self._ioloop)) # Allow ioloop to run once before stopping so that callbacks # scheduled by callback() above get a chance to run. yield gen.moment self._ioloop.stop() self._running.clear() try: self._ioloop.add_callback( lambda: gen.chain_future(_stop(), stopped_future)) except AttributeError: # Probably we have been shut-down already pass return stopped_future
def test_timeout_nocb(self): """Test requests that timeout with no callbacks.""" # Included to test https://katfs.kat.ac.za/mantis/view.php?id=1722 # Situation can occur during a race between the timeout handler and the # receipt of a reply -- the reply can arrive after the timeout timer has # expired but before the request has been popped off the stack with # client._pop_async_request(). The normal request handler then pops off # the request first, resulting in the timeout handler getting a bunch of # None's. It should handle this gracefully. # Running the handler with a fake msg_id should have the same result as # running it after a real request has already been popped. The expected # result is that no assertions are raised. # NM 2014-09-26: This is probably no longer an issue with the tornado-based client # implementatin, but leaving the test for good measure f = Future() @gen.coroutine def cb(): self.client._handle_timeout('fake_msg_id', time.time()) self.client.ioloop.add_callback(lambda : gen.chain_future(cb(), f)) f.result(timeout=1)
def test_timeout_nocb(self): """Test requests that timeout with no callbacks.""" # Included to test https://katfs.kat.ac.za/mantis/view.php?id=1722 # Situation can occur during a race between the timeout handler and the # receipt of a reply -- the reply can arrive after the timeout timer has # expired but before the request has been popped off the stack with # client._pop_async_request(). The normal request handler then pops off # the request first, resulting in the timeout handler getting a bunch of # None's. It should handle this gracefully. # Running the handler with a fake msg_id should have the same result as # running it after a real request has already been popped. The expected # result is that no assertions are raised. # NM 2014-09-26: This is probably no longer an issue with the tornado-based client # implementatin, but leaving the test for good measure f = Future() @gen.coroutine def cb(): self.client._handle_timeout('fake_msg_id', time.time()) self.client.ioloop.add_callback(lambda: gen.chain_future(cb(), f)) f.result(timeout=1)