def run(): try: result = func() if result is not None: from salt.ext.tornado.gen import convert_yielded result = convert_yielded(result) except Exception: future_cell[0] = TracebackFuture() future_cell[0].set_exc_info(sys.exc_info()) else: if is_future(result): future_cell[0] = result else: future_cell[0] = TracebackFuture() future_cell[0].set_result(result) self.add_future(future_cell[0], lambda future: self.stop())
def wrapper(*args, **kwargs): future = TracebackFuture() callback, args, kwargs = replacer.replace(future, args, kwargs) if callback is not None: future.add_done_callback( functools.partial(_auth_future_to_callback, callback)) def handle_exception(typ, value, tb): if future.done(): return False else: future.set_exc_info((typ, value, tb)) return True with ExceptionStackContext(handle_exception): f(*args, **kwargs) return future
def __init__(self, io_loop, request, on_message_callback=None, compression_options=None, ping_interval=None, ping_timeout=None, max_message_size=None): self.compression_options = compression_options self.connect_future = TracebackFuture() self.protocol = None self.read_future = None self.read_queue = collections.deque() self.key = base64.b64encode(os.urandom(16)) self._on_message_callback = on_message_callback self.close_code = self.close_reason = None self.ping_interval = ping_interval self.ping_timeout = ping_timeout self.max_message_size = max_message_size scheme, sep, rest = request.url.partition(':') scheme = {'ws': 'http', 'wss': 'https'}[scheme] request.url = scheme + sep + rest request.headers.update({ 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Key': self.key, 'Sec-WebSocket-Version': '13', }) if self.compression_options is not None: # Always offer to let the server set our max_wbits (and even though # we don't offer it, we will accept a client_no_context_takeover # from the server). # TODO: set server parameters for deflate extension # if requested in self.compression_options. request.headers['Sec-WebSocket-Extensions'] = ( 'permessage-deflate; client_max_window_bits') self.tcp_client = TCPClient(io_loop=io_loop) super(WebSocketClientConnection, self).__init__(io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.tcp_client, 65536, 104857600)
def read_message(self, callback=None): """Reads a message from the WebSocket server. If on_message_callback was specified at WebSocket initialization, this function will never return messages Returns a future whose result is the message, or None if the connection is closed. If a callback argument is given it will be called with the future when it is ready. """ assert self.read_future is None future = TracebackFuture() if self.read_queue: future.set_result(self.read_queue.popleft()) else: self.read_future = future if callback is not None: self.io_loop.add_future(future, callback) return future
class WebSocketClientConnection(simple_httpclient._HTTPConnection): """WebSocket client connection. This class should not be instantiated directly; use the `websocket_connect` function instead. """ def __init__(self, io_loop, request, on_message_callback=None, compression_options=None, ping_interval=None, ping_timeout=None, max_message_size=None): self.compression_options = compression_options self.connect_future = TracebackFuture() self.protocol = None self.read_future = None self.read_queue = collections.deque() self.key = base64.b64encode(os.urandom(16)) self._on_message_callback = on_message_callback self.close_code = self.close_reason = None self.ping_interval = ping_interval self.ping_timeout = ping_timeout self.max_message_size = max_message_size scheme, sep, rest = request.url.partition(':') scheme = {'ws': 'http', 'wss': 'https'}[scheme] request.url = scheme + sep + rest request.headers.update({ 'Upgrade': 'websocket', 'Connection': 'Upgrade', 'Sec-WebSocket-Key': self.key, 'Sec-WebSocket-Version': '13', }) if self.compression_options is not None: # Always offer to let the server set our max_wbits (and even though # we don't offer it, we will accept a client_no_context_takeover # from the server). # TODO: set server parameters for deflate extension # if requested in self.compression_options. request.headers['Sec-WebSocket-Extensions'] = ( 'permessage-deflate; client_max_window_bits') self.tcp_client = TCPClient(io_loop=io_loop) super(WebSocketClientConnection, self).__init__(io_loop, None, request, lambda: None, self._on_http_response, 104857600, self.tcp_client, 65536, 104857600) def close(self, code=None, reason=None): """Closes the websocket connection. ``code`` and ``reason`` are documented under `WebSocketHandler.close`. .. versionadded:: 3.2 .. versionchanged:: 4.0 Added the ``code`` and ``reason`` arguments. """ if self.protocol is not None: self.protocol.close(code, reason) self.protocol = None def on_connection_close(self): if not self.connect_future.done(): self.connect_future.set_exception(StreamClosedError()) self.on_message(None) self.tcp_client.close() super(WebSocketClientConnection, self).on_connection_close() def _on_http_response(self, response): if not self.connect_future.done(): if response.error: self.connect_future.set_exception(response.error) else: self.connect_future.set_exception( WebSocketError("Non-websocket response")) def headers_received(self, start_line, headers): if start_line.code != 101: return super(WebSocketClientConnection, self).headers_received(start_line, headers) self.headers = headers self.protocol = self.get_websocket_protocol() self.protocol._process_server_headers(self.key, self.headers) self.protocol.start_pinging() self.protocol._receive_frame() if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None self.stream = self.connection.detach() self.stream.set_close_callback(self.on_connection_close) # Once we've taken over the connection, clear the final callback # we set on the http request. This deactivates the error handling # in simple_httpclient that would otherwise interfere with our # ability to see exceptions. self.final_callback = None self.connect_future.set_result(self) def write_message(self, message, binary=False): """Sends a message to the WebSocket server.""" return self.protocol.write_message(message, binary) def read_message(self, callback=None): """Reads a message from the WebSocket server. If on_message_callback was specified at WebSocket initialization, this function will never return messages Returns a future whose result is the message, or None if the connection is closed. If a callback argument is given it will be called with the future when it is ready. """ assert self.read_future is None future = TracebackFuture() if self.read_queue: future.set_result(self.read_queue.popleft()) else: self.read_future = future if callback is not None: self.io_loop.add_future(future, callback) return future def on_message(self, message): if self._on_message_callback: self._on_message_callback(message) elif self.read_future is not None: self.read_future.set_result(message) self.read_future = None else: self.read_queue.append(message) def on_pong(self, data): pass def on_ping(self, data): pass def get_websocket_protocol(self): return WebSocketProtocol13( self, mask_outgoing=True, compression_options=self.compression_options)
def fetch(self, request, callback=None, raise_error=True, **kwargs): """Executes a request, asynchronously returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an `HTTPResponse`. By default, the ``Future`` will raise an `HTTPError` if the request returned a non-200 response code (other errors may also be raised if the server could not be contacted). Instead, if ``raise_error`` is set to False, the response will always be returned regardless of the response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. """ if self._closed: raise RuntimeError("fetch() called on closed AsyncHTTPClient") if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) else: if kwargs: raise ValueError( "kwargs can't be used if request is an HTTPRequest object") # We may modify this (to add Host, Accept-Encoding, etc), # so make sure we don't modify the caller's object. This is also # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request = _RequestProxy(request, self.defaults) future = TracebackFuture() if callback is not None: callback = stack_context.wrap(callback) def handle_future(future): exc = future.exception() if isinstance(exc, HTTPError) and exc.response is not None: response = exc.response elif exc is not None: response = HTTPResponse(request, 599, error=exc, request_time=time.time() - request.start_time) else: response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) def handle_response(response): if raise_error and response.error: future.set_exception(response.error) else: future.set_result(response) self.fetch_impl(request, handle_response) return future
class Runner(object): """Internal implementation of `tornado.gen.engine`. Maintains information about pending callbacks and their results. The results of the generator are stored in ``result_future`` (a `.TracebackFuture`) """ def __init__(self, gen, result_future, first_yielded): self.gen = gen self.result_future = result_future self.future = _null_future self.yield_point = None self.pending_callbacks = None self.results = None self.running = False self.finished = False self.had_exception = False self.io_loop = IOLoop.current() # For efficiency, we do not create a stack context until we # reach a YieldPoint (stack contexts are required for the historical # semantics of YieldPoints, but not for Futures). When we have # done so, this field will be set and must be called at the end # of the coroutine. self.stack_context_deactivate = None if self.handle_yield(first_yielded): gen = result_future = first_yielded = None self.run() def register_callback(self, key): """Adds ``key`` to the list of callbacks.""" if self.pending_callbacks is None: # Lazily initialize the old-style YieldPoint data structures. self.pending_callbacks = set() self.results = {} if key in self.pending_callbacks: raise KeyReuseError("key %r is already pending" % (key,)) self.pending_callbacks.add(key) def is_ready(self, key): """Returns true if a result is available for ``key``.""" if self.pending_callbacks is None or key not in self.pending_callbacks: raise UnknownKeyError("key %r is not pending" % (key,)) return key in self.results def set_result(self, key, result): """Sets the result for ``key`` and attempts to resume the generator.""" self.results[key] = result if self.yield_point is not None and self.yield_point.is_ready(): try: self.future.set_result(self.yield_point.get_result()) except: self.future.set_exc_info(sys.exc_info()) self.yield_point = None self.run() def pop_result(self, key): """Returns the result for ``key`` and unregisters it.""" self.pending_callbacks.remove(key) return self.results.pop(key) def run(self): """Starts or resumes the generator, running until it reaches a yield point that is not ready. """ if self.running or self.finished: return try: self.running = True while True: future = self.future if not future.done(): return self.future = None try: orig_stack_contexts = stack_context._state.contexts exc_info = None try: value = future.result() except Exception: self.had_exception = True exc_info = sys.exc_info() future = None if exc_info is not None: try: yielded = self.gen.throw(*exc_info) finally: # Break up a reference to itself # for faster GC on CPython. exc_info = None else: yielded = self.gen.send(value) if stack_context._state.contexts is not orig_stack_contexts: self.gen.throw( stack_context.StackContextInconsistentError( 'stack_context inconsistency (probably caused ' 'by yield within a "with StackContext" block)')) except (StopIteration, Return) as e: self.finished = True self.future = _null_future if self.pending_callbacks and not self.had_exception: # If we ran cleanly without waiting on all callbacks # raise an error (really more of a warning). If we # had an exception then some callbacks may have been # orphaned, so skip the check in that case. raise LeakedCallbackError( "finished without waiting for callbacks %r" % self.pending_callbacks) self.result_future.set_result(_value_from_stopiteration(e)) self.result_future = None self._deactivate_stack_context() return except Exception: self.finished = True self.future = _null_future self.result_future.set_exc_info(sys.exc_info()) self.result_future = None self._deactivate_stack_context() return if not self.handle_yield(yielded): return yielded = None finally: self.running = False def handle_yield(self, yielded): # Lists containing YieldPoints require stack contexts; # other lists are handled in convert_yielded. if _contains_yieldpoint(yielded): yielded = multi(yielded) if isinstance(yielded, YieldPoint): # YieldPoints are too closely coupled to the Runner to go # through the generic convert_yielded mechanism. self.future = TracebackFuture() def start_yield_point(): try: yielded.start(self) if yielded.is_ready(): self.future.set_result( yielded.get_result()) else: self.yield_point = yielded except Exception: self.future = TracebackFuture() self.future.set_exc_info(sys.exc_info()) if self.stack_context_deactivate is None: # Start a stack context if this is the first # YieldPoint we've seen. with stack_context.ExceptionStackContext( self.handle_exception) as deactivate: self.stack_context_deactivate = deactivate def cb(): start_yield_point() self.run() self.io_loop.add_callback(cb) return False else: start_yield_point() else: try: self.future = convert_yielded(yielded) except BadYieldError: self.future = TracebackFuture() self.future.set_exc_info(sys.exc_info()) if not self.future.done() or self.future is moment: def inner(f): # Break a reference cycle to speed GC. f = None # noqa self.run() self.io_loop.add_future( self.future, inner) return False return True def result_callback(self, key): return stack_context.wrap(_argument_adapter( functools.partial(self.set_result, key))) def handle_exception(self, typ, value, tb): if not self.running and not self.finished: self.future = TracebackFuture() self.future.set_exc_info((typ, value, tb)) self.run() return True else: return False def _deactivate_stack_context(self): if self.stack_context_deactivate is not None: self.stack_context_deactivate() self.stack_context_deactivate = None
class WaitIterator(object): """Provides an iterator to yield the results of futures as they finish. Yielding a set of futures like this: ``results = yield [future1, future2]`` pauses the coroutine until both ``future1`` and ``future2`` return, and then restarts the coroutine with the results of both futures. If either future is an exception, the expression will raise that exception and all the results will be lost. If you need to get the result of each future as soon as possible, or if you need the result of some futures even if others produce errors, you can use ``WaitIterator``:: wait_iterator = gen.WaitIterator(future1, future2) while not wait_iterator.done(): try: result = yield wait_iterator.next() except Exception as e: print("Error {} from {}".format(e, wait_iterator.current_future)) else: print("Result {} received from {} at {}".format( result, wait_iterator.current_future, wait_iterator.current_index)) Because results are returned as soon as they are available the output from the iterator *will not be in the same order as the input arguments*. If you need to know which future produced the current result, you can use the attributes ``WaitIterator.current_future``, or ``WaitIterator.current_index`` to get the index of the future from the input list. (if keyword arguments were used in the construction of the `WaitIterator`, ``current_index`` will use the corresponding keyword). On Python 3.5, `WaitIterator` implements the async iterator protocol, so it can be used with the ``async for`` statement (note that in this version the entire iteration is aborted if any value raises an exception, while the previous example can continue past individual errors):: async for result in gen.WaitIterator(future1, future2): print("Result {} received from {} at {}".format( result, wait_iterator.current_future, wait_iterator.current_index)) .. versionadded:: 4.1 .. versionchanged:: 4.3 Added ``async for`` support in Python 3.5. """ def __init__(self, *args, **kwargs): if args and kwargs: raise ValueError( "You must provide args or kwargs, not both") if kwargs: self._unfinished = dict((f, k) for (k, f) in kwargs.items()) futures = list(kwargs.values()) else: self._unfinished = dict((f, i) for (i, f) in enumerate(args)) futures = args self._finished = collections.deque() self.current_index = self.current_future = None self._running_future = None for future in futures: future.add_done_callback(self._done_callback) def done(self): """Returns True if this iterator has no more results.""" if self._finished or self._unfinished: return False # Clear the 'current' values when iteration is done. self.current_index = self.current_future = None return True def next(self): """Returns a `.Future` that will yield the next available result. Note that this `.Future` will not be the same object as any of the inputs. """ self._running_future = TracebackFuture() if self._finished: self._return_result(self._finished.popleft()) return self._running_future def _done_callback(self, done): if self._running_future and not self._running_future.done(): self._return_result(done) else: self._finished.append(done) def _return_result(self, done): """Called set the returned future's state that of the future we yielded, and set the current future for the iterator. """ chain_future(done, self._running_future) self.current_future = done self.current_index = self._unfinished.pop(done) def __aiter__(self): raise self def __anext__(self): if self.done(): # Lookup by name to silence pyflakes on older versions. raise getattr(builtins, 'StopAsyncIteration')() return self.next()
def wrapper(*args, **kwargs): future = TracebackFuture() if replace_callback and 'callback' in kwargs: callback = kwargs.pop('callback') IOLoop.current().add_future( future, lambda future: callback(future.result())) try: result = func(*args, **kwargs) except (Return, StopIteration) as e: result = _value_from_stopiteration(e) except Exception: future.set_exc_info(sys.exc_info()) return future else: if isinstance(result, GeneratorType): # Inline the first iteration of Runner.run. This lets us # avoid the cost of creating a Runner when the coroutine # never actually yields, which in turn allows us to # use "optional" coroutines in critical path code without # performance penalty for the synchronous case. try: orig_stack_contexts = stack_context._state.contexts yielded = next(result) if stack_context._state.contexts is not orig_stack_contexts: yielded = TracebackFuture() yielded.set_exception( stack_context.StackContextInconsistentError( 'stack_context inconsistency (probably caused ' 'by yield within a "with StackContext" block)')) except (StopIteration, Return) as e: future.set_result(_value_from_stopiteration(e)) except Exception: future.set_exc_info(sys.exc_info()) else: _futures_to_runners[future] = Runner(result, future, yielded) yielded = None try: return future finally: # Subtle memory optimization: if next() raised an exception, # the future's exc_info contains a traceback which # includes this stack frame. This creates a cycle, # which will be collected at the next full GC but has # been shown to greatly increase memory usage of # benchmarks (relative to the refcount-based scheme # used in the absence of cycles). We can avoid the # cycle by clearing the local variable after we return it. future = None future.set_result(result) return future