def test_context_getset_5(self): c = contextvars.ContextVar('c', default=42) c.set([]) def fun(): c.set([]) c.get().append(42) self.assertEqual(c.get(), [42]) contextvars.copy_context().run(fun) self.assertEqual(c.get(), [])
def test_context_getset_1(self): c = contextvars.ContextVar('c') with self.assertRaises(LookupError): c.get() self.assertIsNone(c.get(None)) t0 = c.set(42) self.assertEqual(c.get(), 42) self.assertEqual(c.get(None), 42) self.assertIs(t0.old_value, t0.MISSING) self.assertIs(t0.old_value, contextvars.Token.MISSING) self.assertIs(t0.var, c) t = c.set('spam') self.assertEqual(c.get(), 'spam') self.assertEqual(c.get(None), 'spam') self.assertEqual(t.old_value, 42) c.reset(t) self.assertEqual(c.get(), 42) self.assertEqual(c.get(None), 42) c.set('spam2') with self.assertRaisesRegex(RuntimeError, 'has already been used'): c.reset(t) self.assertEqual(c.get(), 'spam2') ctx1 = contextvars.copy_context() self.assertIn(c, ctx1) c.reset(t0) with self.assertRaisesRegex(RuntimeError, 'has already been used'): c.reset(t0) self.assertIsNone(c.get(None)) self.assertIn(c, ctx1) self.assertEqual(ctx1[c], 'spam2') self.assertEqual(ctx1.get(c, 'aa'), 'spam2') self.assertEqual(len(ctx1), 1) self.assertEqual(list(ctx1.items()), [(c, 'spam2')]) self.assertEqual(list(ctx1.values()), ['spam2']) self.assertEqual(list(ctx1.keys()), [c]) self.assertEqual(list(ctx1), [c]) ctx2 = contextvars.copy_context() self.assertNotIn(c, ctx2) with self.assertRaises(KeyError): ctx2[c] self.assertEqual(ctx2.get(c, 'aa'), 'aa') self.assertEqual(len(ctx2), 0) self.assertEqual(list(ctx2), [])
def __getitem__(self, key): var = self._vars.get(key) ctx = contextvars.copy_context() if var and var in ctx: return ctx[var] else: raise KeyError(key)
def __getattr__(self, name): var = self._vars.get(name) ctx = contextvars.copy_context() if var and var in ctx: return ctx[var] else: raise AttributeError(f"'{self.__class__.__name__}' has no attribute '{name}'")
async def run_application(self) -> None: """ Run application. """ def handle_incoming_data() -> None: data = self.conn.recv(1024) if data: self.feed(data) else: # Connection closed by client. logger.info('Connection closed by client. %r %r' % self.addr) self.close() async def run() -> None: # Add reader. loop = get_event_loop() loop.add_reader(self.conn, handle_incoming_data) try: await self.interact(self) except Exception as e: print('Got %s' % type(e).__name__, e) import traceback; traceback.print_exc() raise finally: self.close() with create_app_session(input=self.vt100_input, output=self.vt100_output): self.context = contextvars.copy_context() await run()
def func1(): self.assertIsNone(var.get(None)) var.set('spam') ctx2.run(func2) self.assertEqual(var.get(None), 'spam') cur = contextvars.copy_context() self.assertEqual(len(cur), 1) self.assertEqual(cur[var], 'spam') return cur
def add_done_callback(self, fn, *, context=None): """Add a callback to be run when the future becomes done. The callback is called with a single argument - the future object. If the future is already done when this is called, the callback is scheduled with call_soon. """ if context is None: context = contextvars.copy_context() if self._state != _PENDING: self._loop.call_soon(fn, self, context=context) else: self._callbacks.append((fn, context))
def run_in_executor_with_context( func: Callable[..., _T], *args: Any, loop: Optional[AbstractEventLoop] = None) -> Awaitable[_T]: """ Run a function in an executor, but make sure it uses the same contextvars. This is required so that the function will see the right application. See also: https://bugs.python.org/issue34014 """ loop = loop or get_event_loop() ctx: contextvars.Context = contextvars.copy_context() return loop.run_in_executor(None, ctx.run, func, *args)
def __init__(self, callback, args, loop, context=None): if context is None: context = contextvars.copy_context() self._context = context self._loop = loop self._callback = callback self._args = args self._cancelled = False self._repr = None if self._loop.get_debug(): self._source_traceback = format_helpers.extract_stack( sys._getframe(1)) else: self._source_traceback = None
def context_callback(context, callback): # Python 3.7 introduced the idea of context variables. # asyncio.call_{at,later,soon} need to run any callbacks # *inside* the context provided. if sys.version_info >= (3, 7): import contextvars if context is None: context = contextvars.copy_context() def _callback(*args): context.run(callback, *args) return _callback else: return callback
async def inner(self, x): self.assertEqual(ctx_var.get(), x) await self.gen_inner(x) self.assertEqual(ctx_var.get(), x) # IOLoop.run_in_executor doesn't automatically copy context ctx = contextvars.copy_context() await self.io_loop.run_in_executor(None, lambda: ctx.run(self.thread_inner, x)) self.assertEqual(ctx_var.get(), x) # Neither does asyncio's run_in_executor. await asyncio.get_event_loop().run_in_executor( None, lambda: ctx.run(self.thread_inner, x) ) self.assertEqual(ctx_var.get(), x)
def __init__(self, coro, *, loop=None): super().__init__(loop=loop) if self._source_traceback: del self._source_traceback[-1] if not coroutines.iscoroutine(coro): # raise after Future.__init__(), attrs are required for __del__ # prevent logging for pending task in __del__ self._log_destroy_pending = False raise TypeError("a coroutine was expected, got {!r}".format(coro)) self._must_cancel = False self._fut_waiter = None self._coro = coro self._context = contextvars.copy_context() self._loop.call_soon(self.__step, context=self._context) _register_task(self)
async def _finish_with_xslt(self): self.log.debug('finishing with XSLT') if self.handler._headers.get('Content-Type') is None: self.handler.set_header('Content-Type', media_types.TEXT_HTML) def job(): start_time = time.time() result = self.transform(copy.deepcopy(self.doc.to_etree_element()), profile_run=self.handler.debug_mode.profile_xslt) return start_time, str(result), result.xslt_profile def get_xsl_log(): xsl_line = 'XSLT {0.level_name} in file "{0.filename}", line {0.line}, column {0.column}\n\t{0.message}' return '\n'.join(map(xsl_line.format, self.transform.error_log)) try: ctx = contextvars.copy_context() xslt_result = await IOLoop.current().run_in_executor(self.executor, lambda: ctx.run(job)) if self.handler.is_finished(): return None start_time, xml_result, xslt_profile = xslt_result self.log.info('applied XSL %s in %.2fms', self.transform_filename, (time.time() - start_time) * 1000) if xslt_profile is not None: self.log.debug('XSLT profiling results', extra={'_xslt_profile': xslt_profile.getroot()}) if len(self.transform.error_log): self.log.warning(get_xsl_log()) self.handler.stages_logger.commit_stage('xsl') return xml_result except Exception as e: self.log.error('failed XSLT %s', self.transform_filename) self.log.error(get_xsl_log()) raise e
def items(self): return [(var.name, value) for var, value in contextvars.copy_context().items()]
async def watch_and_shrink_cache( *, flowdb_connection: "Connection", pool: Executor, sleep_time: int = 86400, timeout: Optional[int] = 600, loop: bool = True, size_threshold: int = None, dry_run: bool = False, protected_period: Optional[int] = None, ) -> None: """ Background task to periodically trigger a shrink of the cache. Parameters ---------- flowdb_connection : Connection Flowdb connection to check dates on pool : Executor Executor to run the date check with sleep_time : int, default 86400 Number of seconds to sleep for between checks timeout : int or None, default 600 Seconds to wait for a cache shrink to complete before cancelling it loop : bool, default True Set to false to return after the first check size_threshold : int, default None Optionally override the maximum cache size set in flowdb. dry_run : bool, default False Set to true to just report the objects that would be removed and not remove them protected_period : int, default None Optionally specify a number of seconds within which cache entries are excluded. If None, the value stored in cache.cache_config will be used.Set to a negative number to ignore cache protection completely. Returns ------- None """ shrink_func = partial( shrink_below_size, connection=flowdb_connection, size_threshold=size_threshold, dry_run=dry_run, protected_period=protected_period, ) while True: logger.debug("Checking if cache should be shrunk.") try: # Set the shrink function running with a copy of the current execution context (db conn etc) in background thread await asyncio.wait_for( asyncio.get_running_loop().run_in_executor( pool, copy_context().run, shrink_func), timeout=timeout, ) except TimeoutError: logger.error( f"Failed to complete cache shrink within {timeout}s. Trying again in {sleep_time}s." ) if not loop: break await asyncio.sleep(sleep_time)
async def execute(self, future: Awaitable[RetT]) -> RetT: loop = asyncio.get_event_loop() ctx = copy_context() return await loop.run_in_executor( self._executor, self._run_in_thread, future, ctx)
"""High-level support for working with threads in asyncio""" import functools import contextvars from asyncio import events async def toThread(func, /, *args, **kwargs): """Asynchronously run function *func* in a separate thread. Any *args and **kwargs supplied for this function are directly passed to *func*. Also, the current :class:`contextvars.Context` is propagated, allowing context variables from the main thread to be accessed in the separate thread. Return a coroutine that can be awaited to get the eventual result of *func*. """ loop = events.get_running_loop() ctx = contextvars.copy_context() func_call = functools.partial(ctx.run, func, *args, **kwargs) return await loop.run_in_executor(None, func_call)
def __init__(self, methodName='runTest'): super().__init__(methodName) self._asyncioRunner = None self._asyncioTestContext = contextvars.copy_context()
async def notify(self, event: DialogUpdateEvent) -> None: callback = lambda: asyncio.create_task(self._process_update(event)) asyncio.get_running_loop().call_soon(callback, context=copy_context())
async def _run_async() -> _AppResult: " Coroutine. " loop = get_event_loop() f = loop.create_future() self.future = f # XXX: make sure to set this before calling '_redraw'. self.loop = loop self.context = contextvars.copy_context() # Counter for cancelling 'flush' timeouts. Every time when a key is # pressed, we start a 'flush' timer for flushing our escape key. But # when any subsequent input is received, a new timer is started and # the current timer will be ignored. flush_task: Optional[asyncio.Task[None]] = None # Reset. self.reset() self._pre_run(pre_run) # Feed type ahead input first. self.key_processor.feed_multiple(get_typeahead(self.input)) self.key_processor.process_keys() def read_from_input() -> None: nonlocal flush_task # Ignore when we aren't running anymore. This callback will # removed from the loop next time. (It could be that it was # still in the 'tasks' list of the loop.) # Except: if we need to process incoming CPRs. if not self._is_running and not self.renderer.waiting_for_cpr: return # Get keys from the input object. keys = self.input.read_keys() # Feed to key processor. self.key_processor.feed_multiple(keys) self.key_processor.process_keys() # Quit when the input stream was closed. if self.input.closed: f.set_exception(EOFError) else: # Automatically flush keys. if flush_task: flush_task.cancel() flush_task = self.create_background_task( auto_flush_input()) async def auto_flush_input() -> None: # Flush input after timeout. # (Used for flushing the enter key.) # This sleep can be cancelled, in that case we won't flush yet. await sleep(self.ttimeoutlen) flush_input() def flush_input() -> None: if not self.is_done: # Get keys, and feed to key processor. keys = self.input.flush_keys() self.key_processor.feed_multiple(keys) self.key_processor.process_keys() if self.input.closed: f.set_exception(EOFError) # Enter raw mode, attach input and attach WINCH event handler. with self.input.raw_mode(), self.input.attach( read_from_input), attach_winch_signal_handler( self._on_resize): # Draw UI. self._request_absolute_cursor_position() self._redraw() self._start_auto_refresh_task() # Wait for UI to finish. try: result = await f finally: # In any case, when the application finishes. # (Successful, or because of an error.) try: self._redraw(render_as_done=True) finally: # _redraw has a good chance to fail if it calls widgets # with bad code. Make sure to reset the renderer # anyway. self.renderer.reset() # Unset `is_running`, this ensures that possibly # scheduled draws won't paint during the following # yield. self._is_running = False # Detach event handlers for invalidate events. # (Important when a UIControl is embedded in multiple # applications, like ptterm in pymux. An invalidate # should not trigger a repaint in terminated # applications.) for ev in self._invalidate_events: ev -= self._invalidate_handler self._invalidate_events = [] # Wait for CPR responses. if self.input.responds_to_cpr: await self.renderer.wait_for_cpr_responses() # Wait for the run-in-terminals to terminate. previous_run_in_terminal_f = self._running_in_terminal_f if previous_run_in_terminal_f: await previous_run_in_terminal_f # Store unprocessed input as typeahead for next time. store_typeahead(self.input, self.key_processor.empty_queue()) return result
async def run_sync(func, *args, thread_pool=None): loop = asyncio.get_running_loop() thread_pool = thread_pool or THREAD_POOL return await loop.run_in_executor(thread_pool, copy_context().run, func, *args)
def __call__(self, *args, **kwargs): # You can't call AsyncToSync from a thread with a running event loop try: event_loop = get_running_loop() except RuntimeError: pass else: if event_loop.is_running(): raise RuntimeError( "You cannot use AsyncToSync in the same thread as an async event loop - " "just await the async function directly.") if contextvars is not None: # Wrapping context in list so it can be reassigned from within # `main_wrap`. context = [contextvars.copy_context()] else: context = None # Make a future for the return information call_result = Future() # Get the source thread source_thread = threading.current_thread() # Make a CurrentThreadExecutor we'll use to idle in this thread - we # need one for every sync frame, even if there's one above us in the # same thread. if hasattr(self.executors, "current"): old_current_executor = self.executors.current else: old_current_executor = None current_executor = CurrentThreadExecutor() self.executors.current = current_executor # Use call_soon_threadsafe to schedule a synchronous callback on the # main event loop's thread if it's there, otherwise make a new loop # in this thread. try: awaitable = self.main_wrap(args, kwargs, call_result, source_thread, sys.exc_info(), context) if not (self.main_event_loop and self.main_event_loop.is_running()): # Make our own event loop - in a new thread - and run inside that. loop = asyncio.new_event_loop() loop_executor = ThreadPoolExecutor(max_workers=1) loop_future = loop_executor.submit(self._run_event_loop, loop, awaitable) if current_executor: # Run the CurrentThreadExecutor until the future is done current_executor.run_until_future(loop_future) # Wait for future and/or allow for exception propagation loop_future.result() else: # Call it inside the existing loop self.main_event_loop.call_soon_threadsafe( self.main_event_loop.create_task, awaitable) if current_executor: # Run the CurrentThreadExecutor until the future is done current_executor.run_until_future(call_result) finally: # Clean up any executor we were running if hasattr(self.executors, "current"): del self.executors.current if old_current_executor: self.executors.current = old_current_executor if contextvars is not None: _restore_context(context[0]) # Wait for results from the future. return call_result.result()
async def to_thread(func: Callable[[], T], /) -> T: loop = asyncio.events.get_running_loop() ctx = contextvars.copy_context() func_call = cast(Callable[[], T], functools.partial(ctx.run, func)) return await loop.run_in_executor(None, func_call)
def clear(self): ctx = contextvars.copy_context() for var in ctx.keys(): var.set(None)
def main(): logger = structlog.get_logger() # NOTE: using cache_structlog_logger=False JUST FOR DEMO to showcase style changes. uberlogging.configure(cache_structlog_loggers=False) logger.info("Plain text, autoconfigured with %s", "defaults", text="foo", i=1) logging.getLogger("STDLIB").warning("Stdlib logger comming %s", "through") logging.getLogger().debug( "You should not see this line since root log level is INFO by default") uberlogging.configure(style=uberlogging.Style.text_color, cache_structlog_loggers=False) logger.info("Plain text, colors (forced)", text="foo", i=1) uberlogging.configure(style=uberlogging.Style.text_no_color, cache_structlog_loggers=False) logger.info("Plain text, no colors", text="foo", i=1) uberlogging.configure(style=uberlogging.Style.json, cache_structlog_loggers=False) logger.info("Json, no colors", text="foo", i=1) dbgl = "dbg" logger_confs = {dbgl: {"level": "DEBUG"}} uberlogging.configure(cache_structlog_loggers=False, logger_confs=logger_confs) structlog.get_logger(dbgl).debug( "This particular logger is in debug level", text="foo", i=1) lname = "parent.child" logger_confs_list = [dict( name=lname, level="DEBUG", )] uberlogging.configure(cache_structlog_loggers=False, logger_confs_list=logger_confs_list) structlog.get_logger(lname).debug("Hierarchial logger config through list") for suff in ["", "_COLOR", "_NO_COLOR"]: env = "UBERLOGGING_FORCE_TEXT" + suff os.environ[env] = "1" uberlogging.configure(cache_structlog_loggers=False) logger.info(f"Autoconfigured with {env}", text="foo", i=1) del os.environ[env] os.environ[ "UBERLOGGING_MESSAGE_FORMAT"] = "{asctime} {levelname} -> {message} | context: {context}" uberlogging.configure(cache_structlog_loggers=False) logger.info("Format overriden through environment variable", text="foo", i=1) del os.environ["UBERLOGGING_MESSAGE_FORMAT"] uberlogging.configure(fmt="{asctime} {levelname} -- {message}", datefmt="%H:%M:%S", cache_structlog_loggers=False) logger.info("Custom format and timestamp", text="foo", i=1) class MyStream(): def write(self, s): sys.stderr.write("[CUSTOM STREAM] ") sys.stderr.write(s) uberlogging.configure(stream=MyStream(), style=uberlogging.Style.text_auto, cache_structlog_loggers=False) structlog.get_logger().info("Logging with custom stream", text="foo", i=1) # Contextvars demo ctxvar: ContextVar[str] = ContextVar("request_id") uberlogging.configure(contextvars=(ctxvar, ), cache_structlog_loggers=False) logger.info("Main context - no contextvar value is set") def _process_request(): ctxvar.set("CoqIqNGc3BW") logger.info("Child context handling request", payload="bar") logger.info("Child context finishing request") uberlogging.configure(contextvars=(ctxvar, ), style=uberlogging.Style.json, cache_structlog_loggers=False) print("ctxvar value", ctxvar.get()) logger.info("Child context finishing request (JSON mode)") ctx: Context = copy_context() ctx.run(_process_request) logger.info("Main context finished - no contextvar value is set")
def on( event_type: T.Union[T.Type[K], T.Type[object]], namespace: object, listener: T.Optional[ListenerCb[K]] = None, *, once: bool = False, loop: T.Optional[AbstractEventLoop] = None, scope: T.Union[str, T.Tuple[str, ...]] = "", raise_on_exc: bool = False, ) -> T.Union[ListenerCb[K], T.ContextManager[None], T.Callable[[ListenerCb[K]], ListenerCb[K]]]: """Add a listener to event type. Context can't be specified when using this function in decorator mode. Context can't be specified when passing once=True. Args: event_type: Specify which event type or scope namespace will trigger this listener execution. namespace: Specify the namespace in which the listener will be attached. listener: Callable to be executed when there is an emission of the given event. once: Define whether the given listener is to be removed after it's first execution. loop: Specify a loop to bound to the given listener and ensure it is always executed in the correct context. (Default: Current running loop for coroutines functions, None for any other callable) scope: Specify a scope for specializing this listener registration. raise_on_exc: Whether an untreated exception raised by this listener will make an event emission to fail. Raises: TypeError: Failed to bound loop to listener. ValueError: event_type is not a type instance, or it is a builtin type, or it is BaseExceptions or listener is not callable. Returns: If listener isn't provided, this method returns a function that takes a Callable as a \ single argument. As such it can be used as a decorator. In both the decorated and \ undecorated forms this function returns the given event listener. """ if listener is None: return lambda cb: on( event_type, namespace, cb, once=once, loop=loop, scope=scope, raise_on_exc=raise_on_exc, ) if not callable(listener): raise ValueError("Listener must be callable") scope = parse_scope(scope) # Define listeners options opts = ListenerOpts.NOP if once: opts |= ListenerOpts.ONCE if raise_on_exc: opts |= ListenerOpts.RAISE if loop is None and iscoroutinefunction(listener): # Automatically set loop for Coroutines to avoid problems with emission from another thread with suppress(RuntimeError): loop = get_running_loop() if loop: listener = BoundLoopListenerWrapper(loop, listener) # Retrieve listeners listeners = retrieve_listeners_from_namespace(namespace) # Group listener's opts and context with (nullcontext(listeners.context) if listeners.context is None or listeners.context.active else listeners.context): listener_info = (opts, copy_context()) # Add the given listener to the correct queue if event_type is None: raise ValueError("Event type can't be NoneType") elif issubclass(event_type, type): # Event type must be a class. Reject Metaclass and cia. raise ValueError("Event type must be an concrete type") elif issubclass(event_type, BaseException) and not issubclass(event_type, Exception): raise ValueError("Event type can't be a BaseException") else: listeners.scope[scope][event_type][listener] = listener_info if event_type is not NewListener: emit(NewListener(event_type), namespace, sync=True, scope=scope) return listener
def wrapper(*args, **kwargs): # type: (*Any, **Any) -> Future[_T] # This function is type-annotated with a comment to work around # https://bitbucket.org/pypy/pypy/issues/2868/segfault-with-args-type-annotation-in future = _create_future() if contextvars is not None: ctx_run = contextvars.copy_context().run # type: Callable else: ctx_run = _fake_ctx_run try: result = ctx_run(func, *args, **kwargs) except (Return, StopIteration) as e: result = _value_from_stopiteration(e) except Exception: future_set_exc_info(future, sys.exc_info()) try: return future finally: # Avoid circular references future = None # type: ignore else: if isinstance(result, Generator): # Inline the first iteration of Runner.run. This lets us # avoid the cost of creating a Runner when the coroutine # never actually yields, which in turn allows us to # use "optional" coroutines in critical path code without # performance penalty for the synchronous case. try: yielded = ctx_run(next, result) except (StopIteration, Return) as e: future_set_result_unless_cancelled( future, _value_from_stopiteration(e) ) except Exception: future_set_exc_info(future, sys.exc_info()) else: # Provide strong references to Runner objects as long # as their result future objects also have strong # references (typically from the parent coroutine's # Runner). This keeps the coroutine's Runner alive. # We do this by exploiting the public API # add_done_callback() instead of putting a private # attribute on the Future. # (GitHub issues #1769, #2229). runner = Runner(ctx_run, result, future, yielded) future.add_done_callback(lambda _: runner) yielded = None try: return future finally: # Subtle memory optimization: if next() raised an exception, # the future's exc_info contains a traceback which # includes this stack frame. This creates a cycle, # which will be collected at the next full GC but has # been shown to greatly increase memory usage of # benchmarks (relative to the refcount-based scheme # used in the absence of cycles). We can avoid the # cycle by clearing the local variable after we return it. future = None # type: ignore future_set_result_unless_cancelled(future, result) return future
def to_thread(callable: Callable[..., _T], *args, **kwargs) -> Awaitable[_T]: # asyncio.to_thread loop = asyncio.get_running_loop() ctx = contextvars.copy_context() partial = functools.partial(ctx.run, callable, *args, **kwargs) return loop.run_in_executor(None, partial)
async def __call__(self, *args, **kwargs): loop = get_running_loop() # Work out what thread to run the code in if self._thread_sensitive: if hasattr(AsyncToSync.executors, "current"): # If we have a parent sync thread above somewhere, use that executor = AsyncToSync.executors.current elif self.thread_sensitive_context and self.thread_sensitive_context.get( None): # If we have a way of retrieving the current context, attempt # to use a per-context thread pool executor thread_sensitive_context = self.thread_sensitive_context.get() if thread_sensitive_context in self.context_to_thread_executor: # Re-use thread executor in current context executor = self.context_to_thread_executor[ thread_sensitive_context] else: # Create new thread executor in current context executor = ThreadPoolExecutor(max_workers=1) self.context_to_thread_executor[ thread_sensitive_context] = executor elif self.deadlock_context and self.deadlock_context.get(False): raise RuntimeError( "Single thread executor already being used, would deadlock" ) else: # Otherwise, we run it in a fixed single thread executor = self.single_thread_executor if self.deadlock_context: self.deadlock_context.set(True) else: # Use the passed in executor, or the loop's default if it is None executor = self._executor if contextvars is not None: context = contextvars.copy_context() child = functools.partial(self.func, *args, **kwargs) func = context.run args = (child, ) kwargs = {} else: func = self.func # Run the code in the right thread future = loop.run_in_executor( executor, functools.partial( self.thread_handler, loop, self.get_current_task(), sys.exc_info(), func, *args, **kwargs, ), ) ret = await asyncio.wait_for(future, timeout=None) if contextvars is not None: _restore_context(context) if self.deadlock_context: self.deadlock_context.set(False) return ret
def test_context_get_context_1(self): ctx = contextvars.copy_context() self.assertIsInstance(ctx, contextvars.Context)
def __enter__(self) -> "ProgressBar": # Create UI Application. title_toolbar = ConditionalContainer( Window( FormattedTextControl(lambda: self.title), height=1, style="class:progressbar,title", ), filter=Condition(lambda: self.title is not None), ) bottom_toolbar = ConditionalContainer( Window( FormattedTextControl( lambda: self.bottom_toolbar, style="class:bottom-toolbar.text" ), style="class:bottom-toolbar", height=1, ), filter=~is_done & renderer_height_is_known & Condition(lambda: self.bottom_toolbar is not None), ) def width_for_formatter(formatter: Formatter) -> AnyDimension: # Needs to be passed as callable (partial) to the 'width' # parameter, because we want to call it on every resize. return formatter.get_width(progress_bar=self) progress_controls = [ Window( content=_ProgressControl(self, f), width=functools.partial(width_for_formatter, f), ) for f in self.formatters ] self.app: Application[None] = Application( min_redraw_interval=0.05, layout=Layout( HSplit( [ title_toolbar, VSplit( progress_controls, height=lambda: D( preferred=len(self.counters), max=len(self.counters) ), ), Window(), bottom_toolbar, ] ) ), style=self.style, key_bindings=self.key_bindings, refresh_interval=0.3, color_depth=self.color_depth, output=self.output, input=self.input, ) # Run application in different thread. def run() -> None: set_event_loop(self._app_loop) try: self.app.run() except BaseException as e: traceback.print_exc() print(e) ctx: contextvars.Context = contextvars.copy_context() self._thread = threads_new.Thread(target=ctx.run, args=(run,)) self._thread.start() # Attach WINCH signal handler in main thread. # (Interrupt that we receive during resize events.) self._has_sigwinch = hasattr(signal, "SIGWINCH") and in_main_thread() if self._has_sigwinch: self._previous_winch_handler = signal.getsignal(signal.SIGWINCH) self._loop.add_signal_handler(signal.SIGWINCH, self.invalidate) return self
def wrapping(*args, **kwargs): new_func = functools.partial(func, *args, **kwargs) ctx = contextvars.copy_context() ctx_func = functools.partial(ctx.run, new_func) return loop.run_in_executor(_executor, ctx_func)
def wrapped(*args, **kwargs): is_async_ = is_async() if is_async_ and not awaitable: raise AssertionError( "Cannot call not awaitable from async context") elif awaitable and not is_async_: raise AssertionError("Cannot call awaitable from sync context") try: pending = pending_transaction_complete_operations.get() except LookupError: pending = PENDING_TRANSACTION_COMPLETE_OPERATIONS if deduplicate: dedup_id = deduplicate(*args, **kwargs) pending[dedup_id] = callable future = Future() if awaitable else None def call(): if deduplicate: if pending.get(dedup_id) != callable: _logger.info( "Deduplicated call with dedup_id %r to %s", dedup_id, callable) if awaitable: future.set_result(None) elif callback: callback(None) return try: del pending[dedup_id] except KeyError: pass try: result = callable(*args, **kwargs) if awaitable: future.set_result(result) elif callback: callback(result) except Exception as error: if awaitable: future.set_exception(error) elif error_callback: error_callback(error) else: _logger.exception(error, exc_info=True, stack_info=True) raise context = copy_context() if not transaction.get_connection().in_atomic_block: context.run(call) else: transaction.on_commit(lambda: context.run(call)) return future
var = ContextVar('var') var.set('spam') def main(): # 'var' was set to 'spam' before # calling 'copy_context()' and 'ctx.run(main)', so: # var.get() == ctx[var] == 'spam' var.set('ham') print('insite:',var.get()) # Now, after setting 'var' to 'ham': # var.get() == ctx[var] == 'ham' ctx = copy_context() # Any changes that the 'main' function makes to 'var' # will be contained in 'ctx'. ctx.run(main) print('outsite:',var.get()) def main1(): var.set('ham') print('insite2:',var.get()) main1() print('outsite2:',var.get()) # The 'main()' function was run in the 'ctx' context,
def context_partial(func, *args, **kwargs): context = contextvars.copy_context() return partial(context.run, func, *args, **kwargs)
def typeshed_third_party_methods(): copy_context(42) # Noncompliant
def wrapper(*a, **kw): # Keep track of whether the next value to deliver to the generator is # a non-exception or an exception. ok = True # Keep track of the next value to deliver to the generator. value_in = None # Create the generator with a call to the generator function. This # happens with whatever Eliot action context happens to be active, # which is fine and correct and also irrelevant because no code in the # generator function can run until we call send or throw on it. gen = original(*a, **kw) # Initialize the per-generator context to a copy of the current context. context = copy_context() while True: try: # Whichever way we invoke the generator, we will do it # with the Eliot action context stack we've saved for it. # Then the context manager will re-save it and restore the # "outside" stack for us. # # Regarding the support of Twisted's inlineCallbacks-like # functionality (see eliot.twisted.inline_callbacks): # # The invocation may raise the inlineCallbacks internal # control flow exception _DefGen_Return. It is not wrong to # just let that propagate upwards here but inlineCallbacks # does think it is wrong. The behavior triggers a # DeprecationWarning to try to get us to fix our code. We # could explicitly handle and re-raise the _DefGen_Return but # only at the expense of depending on a private Twisted API. # For now, I'm opting to try to encourage Twisted to fix the # situation (or at least not worsen it): # https://twistedmatrix.com/trac/ticket/9590 # # Alternatively, _DefGen_Return is only required on Python 2. # When Python 2 support is dropped, this concern can be # eliminated by always using `return value` instead of # `returnValue(value)` (and adding the necessary logic to the # StopIteration handler below). def go(): if ok: value_out = gen.send(value_in) else: value_out = gen.throw(*value_in) # We have obtained a value from the generator. In # giving it to us, it has given up control. Note this # fact here. Importantly, this is within the # generator's action context so that we get a good # indication of where the yield occurred. # # This is noisy, enable only for debugging: if wrapper.debug: Message.log(message_type="yielded") return value_out value_out = context.run(go) except StopIteration: # When the generator raises this, it is signaling # completion. Leave the loop. break else: try: # Pass the generator's result along to whoever is # driving. Capture the result as the next value to # send inward. value_in = yield value_out except: # Or capture the exception if that's the flavor of the # next value. This could possibly include GeneratorExit # which turns out to be just fine because throwing it into # the inner generator effectively propagates the close # (and with the right context!) just as you would want. # True, the GeneratorExit does get re-throwing out of the # gen.throw call and hits _the_generator_context's # contextmanager. But @contextmanager extremely # conveniently eats it for us! Thanks, @contextmanager! ok = False value_in = exc_info() else: ok = True
def __init__(self) -> None: self.var = var = ContextVar('context', default=42) self.ctx = copy_context() self.test: Optional[int] = None
def decode_and_digest(broker, message, group_name, handlers): stream_name, event_ids, events = decode_item(message) for event_id, event in zip(event_ids, events): ctx = copy_context() ctx.run(digest_event, stream_name, event, event_id, handlers) broker.xack(stream_name, group_name, event_id)
async def invoke( fn: Callable, *args, **kwargs): """ Invoke a single function, but safely for the main asyncio process. Used both for the handler functions and for the lifecycle callbacks. A full set of the arguments is provided, expanding the cause to some easily usable aliases. The function is expected to accept ``**kwargs`` for the args that it does not use -- for forward compatibility with the new features. The synchronous methods are executed in the executor (threads or processes), thus making it non-blocking for the main event loop of the operator. See: https://pymotw.com/3/asyncio/executors.html """ # Add aliases for the kwargs, directly linked to the body, or to the assumed defaults. if 'event' in kwargs: event = kwargs.get('event') kwargs.update( type=event['type'], body=event['object'], spec=event['object'].setdefault('spec', {}), meta=event['object'].setdefault('metadata', {}), status=event['object'].setdefault('status', {}), uid=event['object'].get('metadata', {}).get('uid'), name=event['object'].get('metadata', {}).get('name'), namespace=event['object'].get('metadata', {}).get('namespace'), ) if 'cause' in kwargs: cause = kwargs.get('cause') kwargs.update( event=cause.event, body=cause.body, diff=cause.diff, old=cause.old, new=cause.new, patch=cause.patch, logger=cause.logger, spec=cause.body.setdefault('spec', {}), meta=cause.body.setdefault('metadata', {}), status=cause.body.setdefault('status', {}), uid=cause.body.get('metadata', {}).get('uid'), name=cause.body.get('metadata', {}).get('name'), namespace=cause.body.get('metadata', {}).get('namespace'), ) if is_async_fn(fn): result = await fn(*args, **kwargs) else: # Not that we want to use functools, but for executors kwargs, it is officially recommended: # https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor real_fn = functools.partial(fn, *args, **kwargs) # Copy the asyncio context from current thread to the handlr's thread. # It can be copied 2+ times if there are sub-sub-handlers (rare case). context = contextvars.copy_context() real_fn = functools.partial(context.run, real_fn) loop = asyncio.get_event_loop() result = await loop.run_in_executor(config.WorkersConfig.get_syn_executor(), real_fn) return result
def init_stack(self, generator): """Create a new stack for the given generator.""" self._contexts[generator] = copy_context()
def __init__(self, fn: typing.Callable) -> None: self._fn = fn self._ctx = contextvars.copy_context() # type: contextvars.Context
async def run_sync(func, *args, thread_pool=None, **kwargs): loop = asyncio.get_running_loop() thread_pool = thread_pool or THREAD_POOL func_wrapped = functools.partial(func, *args, **kwargs) return await loop.run_in_executor(thread_pool, copy_context().run, func_wrapped)
def _new_ctx_run(self, *args, **kwargs): return copy_context().run(*args, **kwargs)
async def _run_async() -> _AppResult: " Coroutine. " loop = get_event_loop() f = loop.create_future() self.future = f # XXX: make sure to set this before calling '_redraw'. self.loop = loop self.context = contextvars.copy_context() # Counter for cancelling 'flush' timeouts. Every time when a key is # pressed, we start a 'flush' timer for flushing our escape key. But # when any subsequent input is received, a new timer is started and # the current timer will be ignored. flush_counter = 0 # Reset. self.reset() self._pre_run(pre_run) # Feed type ahead input first. self.key_processor.feed_multiple(get_typeahead(self.input)) self.key_processor.process_keys() def read_from_input() -> None: nonlocal flush_counter # Ignore when we aren't running anymore. This callback will # removed from the loop next time. (It could be that it was # still in the 'tasks' list of the loop.) # Except: if we need to process incoming CPRs. if not self._is_running and not self.renderer.waiting_for_cpr: return # Get keys from the input object. keys = self.input.read_keys() # Feed to key processor. self.key_processor.feed_multiple(keys) self.key_processor.process_keys() # Quit when the input stream was closed. if self.input.closed: f.set_exception(EOFError) else: # Increase this flush counter. flush_counter += 1 counter = flush_counter # Automatically flush keys. ensure_future(auto_flush_input(counter)) async def auto_flush_input(counter: int) -> None: # Flush input after timeout. # (Used for flushing the enter key.) await sleep(self.ttimeoutlen) if flush_counter == counter: flush_input() def flush_input() -> None: if not self.is_done: # Get keys, and feed to key processor. keys = self.input.flush_keys() self.key_processor.feed_multiple(keys) self.key_processor.process_keys() if self.input.closed: f.set_exception(EOFError) # Enter raw mode. with self.input.raw_mode(): with self.input.attach(read_from_input): # Draw UI. self._request_absolute_cursor_position() self._redraw() has_sigwinch = hasattr(signal, 'SIGWINCH') and in_main_thread() if has_sigwinch: previous_winch_handler = signal.getsignal(signal.SIGWINCH) loop.add_signal_handler(signal.SIGWINCH, self._on_resize) # Wait for UI to finish. try: result = await f finally: # In any case, when the application finishes. (Successful, # or because of an error.) try: self._redraw(render_as_done=True) finally: # _redraw has a good chance to fail if it calls widgets # with bad code. Make sure to reset the renderer anyway. self.renderer.reset() # Unset `is_running`, this ensures that possibly # scheduled draws won't paint during the following # yield. self._is_running = False # Detach event handlers for invalidate events. # (Important when a UIControl is embedded in # multiple applications, like ptterm in pymux. An # invalidate should not trigger a repaint in # terminated applications.) for ev in self._invalidate_events: ev -= self._invalidate_handler self._invalidate_events = [] # Wait for CPR responses. if self.input.responds_to_cpr: await self.renderer.wait_for_cpr_responses() if has_sigwinch: loop.remove_signal_handler(signal.SIGWINCH) signal.signal(signal.SIGWINCH, previous_winch_handler) # Wait for the run-in-terminals to terminate. previous_run_in_terminal_f = self._running_in_terminal_f if previous_run_in_terminal_f: await previous_run_in_terminal_f # Store unprocessed input as typeahead for next time. store_typeahead(self.input, self.key_processor.empty_queue()) return result
def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval: # TODO: remove explicit context copying when trio 0.20 is the minimum requirement retval = trio.from_thread.run_sync(copy_context().run, fn, *args) return cast(T_Retval, retval)
async def to_thread(callable: Callable[_P, _T], *args: _P.args, **kwargs: _P.kwargs) -> _T: loop = asyncio.get_running_loop() ctx = contextvars.copy_context() partial = functools.partial(ctx.run, callable, *args, **kwargs) return await loop.run_in_executor(None, partial)
def __init__(self, coro): super().__init__(coro) self._context = contextvars.copy_context()