예제 #1
0
파일: _pytest.py 프로젝트: dr5801/qtrio
    def wrapper(*args, **kwargs):
        request = kwargs["request"]

        qapp = request.getfixturevalue("qapp")
        qtbot = request.getfixturevalue("qtbot")

        test_outcomes_sentinel = qtrio.Outcomes(
            qt=outcome.Value(0), trio=outcome.Value(29),
        )
        test_outcomes = test_outcomes_sentinel

        def done_callback(outcomes):
            nonlocal test_outcomes
            test_outcomes = outcomes

        runner = qtrio._core.Runner(
            application=qapp,
            done_callback=done_callback,
            quit_application=False,
            timeout=timeout,
        )

        runner.run(
            functools.partial(test_function, **kwargs),
            *args,
            execute_application=False,
        )

        # TODO: probably increases runtime of fast tests a lot due to polling
        qtbot.wait_until(
            lambda: test_outcomes is not test_outcomes_sentinel, timeout=3.14e8
        )
        test_outcomes.unwrap()
예제 #2
0
파일: _traps.py 프로젝트: Y-Zhang-0/code
async def reattach_detached_coroutine_object(task, yield_value):
    """Reattach a coroutine object that was detached using
    :func:`temporarily_detach_coroutine_object`.

    When the calling coroutine enters this function it's running under the
    foreign coroutine runner, and when the function returns it's running under
    Trio.

    This must be called from inside the coroutine being resumed, and yields
    whatever value you pass in. (Presumably you'll pass a value that will
    cause the current coroutine runner to stop scheduling this task.) Then the
    coroutine is resumed by the Trio scheduler at the next opportunity.

    Args:
      task (Task): The Trio task object that the current coroutine was
          detached from.
      yield_value (object): The object to yield to the current coroutine
          runner.

    """
    # This is a kind of crude check – in particular, it can fail if the
    # passed-in task is where the coroutine *runner* is running. But this is
    # an experts-only interface, and there's no easy way to do a more accurate
    # check, so I guess that's OK.
    if not task.coro.cr_running:
        raise RuntimeError("given task does not match calling coroutine")
    _run.reschedule(task, outcome.Value("reattaching"))
    value = await _async_yield(yield_value)
    assert value == outcome.Value("reattaching")
예제 #3
0
    async def consume_next():
        try:
            item = await async_generator.__anext__()
            result = outcome.Value(value=item)
        except StopAsyncIteration:
            result = outcome.Value(value=STOP)
        except asyncio.CancelledError:
            # Once we are cancelled, we do not call reschedule() anymore
            return
        except Exception as e:
            result = outcome.Error(error=e)

        trio.hazmat.reschedule(task, result)
예제 #4
0
    async def consume_next():
        t = sniffio.current_async_library_cvar.set("asyncio")
        try:
            item = await async_generator.__anext__()
            result = outcome.Value(value=item)
        except StopAsyncIteration:
            result = outcome.Value(value=STOP)
        except asyncio.CancelledError:
            # Once we are cancelled, we do not call reschedule() anymore
            return
        except Exception as e:
            result = outcome.Error(error=e)
        finally:
            sniffio.current_async_library_cvar.reset(t)

        trio.hazmat.reschedule(task, result)
예제 #5
0
 async def _second_runner(self, queue: RequestQueue[int, int]) -> None:
     while True:
         many = await queue.get_many()
         for val, coro in many[::-1]:
             try:
                 result = await self.queue.request(val + 100)
             except Exception as e:
                 coro.resume(outcome.Error(e))
             else:
                 coro.resume(outcome.Value(result))
예제 #6
0
 async def _first_runner(self, queue: RequestQueue) -> None:
     while True:
         many = await queue.get_many()
         for val, coro in many[::-1]:
             if val == 1337:
                 try:
                     failing_function(x)  # type: ignore
                 except Exception as e:
                     coro.resume(outcome.Error(e))
             else:
                 coro.resume(outcome.Value(val + 10))
예제 #7
0
파일: test_core.py 프로젝트: dr5801/qtrio
def test_outcomes_unwrap_raises_qt_error_over_trio_value():
    """Unwrapping an Outcomes prioritizes a Qt error over a Trio value."""

    class LocalUniqueException(Exception):
        pass

    this_outcome = qtrio.Outcomes(
        qt=outcome.Error(LocalUniqueException()), trio=outcome.Value(8),
    )

    with pytest.raises(LocalUniqueException):
        this_outcome.unwrap()
예제 #8
0
def outcome_from_application_return_code(return_code: int) -> outcome.Outcome:
    """Create either an :class:`outcome.Value` in the case of a 0 `return_code` or an
    :class:`outcome.Error` with a :class:`ReturnCodeError` otherwise.

    Args:
        return_code: The return code to be processed.
    """

    if return_code == 0:
        return outcome.Value(return_code)

    return outcome.Error(qtrio.ReturnCodeError(return_code))
예제 #9
0
 def process_events(self, events):
     for event in events:
         key = (event.ident, event.filter)
         if event.ident == self._force_wakeup_fd:
             self._force_wakeup.drain()
             continue
         receiver = self._registered[key]
         if event.flags & select.KQ_EV_ONESHOT:
             del self._registered[key]
         if type(receiver) is _core.Task:
             _core.reschedule(receiver, outcome.Value(event))
         else:
             receiver.put_nowait(event)
예제 #10
0
 async def receiver():
     async with child_recv_chan:
         async for i in child_recv_chan:
             # Just consume all results from the channel until exhausted
             pass
     # And then wrap up the result and push it to the parent channel
     errors = [e.error for e in result_list if isinstance(e, outcome.Error)]
     if len(errors) > 0:
         result = outcome.Error(trio.MultiError(errors))
     else:
         result = outcome.Value([o.unwrap() for o in result_list])
     async with parent_send_chan:
         await parent_send_chan.send(result)
예제 #11
0
    def put_nowait(self, obj):
        """Attempt to put an object into the queue, without blocking.

        Args:
          obj (object): The object to enqueue.

        Raises:
          WouldBlock: if the queue is full.

        """
        if self._get_wait:
            assert not self._data
            task, _ = self._get_wait.popitem(last=False)
            _core.reschedule(task, outcome.Value(obj))
        elif len(self._data) < self.capacity:
            self._data.append(obj)
        else:
            raise _core.WouldBlock()
예제 #12
0
파일: server.py 프로젝트: smurfix/MudPyC
 async def _msg_in(self, msg):
     if msg.get("result",()) and msg["result"][0] != "Pong":
         self.__logger.debug("IN %r",msg)
     seq = msg.get("seq",None)
     if seq is not None:
         try:
             ev = self._replies[seq]
         except KeyError:
             self.__logger.warning("Unknown Reply %r",msg)
             return
         else:
             if not isinstance(ev, trio.Event):
                 self.__logger.warning("Dup Reply %r",msg)
                 return
         try:
             self._replies[seq] = outcome.Value(msg["result"])
         except KeyError:
             self._replies[seq] = outcome.Error(RuntimeError(msg.get("error","Unknown error")))
         ev.set()
     else:
         await self._dispatch(msg)
     self.__logger.debug("IN done")
예제 #13
0
파일: _io_kqueue.py 프로젝트: guilledk/trio
 def handle_io(self, timeout):
     # max_events must be > 0 or kqueue gets cranky
     # and we generally want this to be strictly larger than the actual
     # number of events we get, so that we can tell that we've gotten
     # all the events in just 1 call.
     max_events = len(self._registered) + 1
     events = []
     while True:
         batch = self._kqueue.control([], max_events, timeout)
         events += batch
         if len(batch) < max_events:
             break
         else:
             timeout = 0
             # and loop back to the start
     for event in events:
         key = (event.ident, event.filter)
         receiver = self._registered[key]
         if event.flags & select.KQ_EV_ONESHOT:
             del self._registered[key]
         if type(receiver) is _core.Task:
             _core.reschedule(receiver, outcome.Value(event))
         else:
             receiver.put_nowait(event)
예제 #14
0
 def abort(_):
     _core.reschedule(task, outcome.Value(1))
     return _core.Abort.FAILED
예제 #15
0
 def resume(self, value: Outcome[SendType]) -> None:
     if self.cancelled:
         # discard the result - not great, obviously...
         logger.debug("TrioContinuation(%s): resumed after cancellation",
                      self.task)
         return
     if self.on_stack:
         logger.debug("TrioContinuation(%s): immediately resumed with %s",
                      self.task, value)
         # This will happen if the function passed to shift immediately resumes the
         # continuation. With trio, we run the function passed to shift on the
         # coroutine that's being suspended. So we can't resume the coroutine here,
         # since it's already running. Instead we'll save the outcome, and in shift()
         # we check saved_send and just return immediately if it's set. This is not
         # normal shift/reset semantics but it's the best we can do with how trio is
         # structured.
         self.saved_send = value
         return
     resuming_task = GLOBAL_RUN_CONTEXT.task
     runner = GLOBAL_RUN_CONTEXT.runner
     logger.debug("TrioContinuation(%s): resuming with %s", self.task,
                  value)
     global _under_coro_runner
     try:
         previous_runner = _under_coro_runner
         _under_coro_runner = Runner.TRIO
         # We have to temporarily set GLOBAL_RUN_CONTEXT.task to the task that is being
         # resumed; after all, that's the task that's really going to be running. This
         # wouldn't be necessary if we had proper dynamically scoped variables in
         # Python :(
         GLOBAL_RUN_CONTEXT.task = self.task
         # a little bit of reschedule(), before we run the task
         self.task._abort_func = None
         self.task.custom_sleep_data = None
         try:
             msg = self.task.context.run(self.task.coro.send, value)
         except StopIteration as exn:
             logger.debug("TrioContinuation(%s): return %s", self.task,
                          exn.value)
             GLOBAL_RUN_CONTEXT.runner.task_exited(self.task,
                                                   outcome.Value(exn.value))
             return
         except BaseException as exn:
             logger.debug("TrioContinuation(%s): raised %s", self.task, exn)
             exn = exn.with_traceback(exn.__traceback__
                                      and exn.__traceback__.tb_next)
             GLOBAL_RUN_CONTEXT.runner.task_exited(self.task,
                                                   outcome.Error(exn))
             return
         logger.debug("TrioContinuation(%s): yield %s", self.task, msg)
     finally:
         _under_coro_runner = previous_runner
         GLOBAL_RUN_CONTEXT.task = resuming_task
     if msg is CancelShieldedCheckpoint:
         runner.reschedule(self.task)
     elif type(msg) is WaitTaskRescheduled:
         self.task._abort_func = msg.abort_func
         if runner.ki_pending and self.task is runner.main_task:
             self.task._attempt_delivery_of_pending_ki()
         self.task._attempt_delivery_of_any_pending_cancel()
     elif type(msg) is PermanentlyDetachCoroutineObject:
         runner.task_exited(self.task, msg.final_outcome)
     else:
         raise TypeError("bad yield from continuation", msg)
예제 #16
0
    def handle_io(self, timeout):
        # Step 0: the first time through, initialize the IOCP thread
        if self._iocp_thread is None:
            # The rare non-daemonic thread -- close() should always be called,
            # even on error paths, and we want to join it there.
            self._iocp_thread = threading.Thread(target=self._iocp_thread_fn,
                                                 name="trio-IOCP")
            self._iocp_thread.start()

        # Step 1: select for sockets, with the given timeout.
        # If there are events queued from the IOCP thread, then the timeout is
        # implicitly reduced to 0 b/c the wakeup socket has pending data in
        # it.
        def socket_ready(what, sock, result):
            task = self._socket_waiters[what].pop(sock)
            _core.reschedule(task, result)

        def socket_check(what, sock):
            try:
                select([sock], [sock], [sock], 0)
            except OSError as exc:
                socket_ready(what, sock, outcome.Error(exc))

        def do_select():
            r_waiting = self._socket_waiters["read"]
            w_waiting = self._socket_waiters["write"]
            # We select for exceptional conditions on the writable set because
            # on Windows, a failed non-blocking connect shows up as
            # "exceptional". Everyone else uses "writable" for this, so we
            # normalize it.
            r, w1, w2 = select(r_waiting, w_waiting, w_waiting, timeout)
            return r, set(w1 + w2)

        try:
            r, w = do_select()
        except OSError:
            # Some socket was closed or similar. Track it down and get rid of
            # it.
            for what in ["read", "write"]:
                for sock in self._socket_waiters[what]:
                    socket_check(what, sock)
            r, w = do_select()

        for sock in r:
            if sock is not self._main_thread_waker.wakeup_sock:
                socket_ready("read", sock, outcome.Value(None))
        for sock in w:
            socket_ready("write", sock, outcome.Value(None))

        # Step 2: drain the wakeup socket.
        # This must be done before checking the IOCP queue.
        self._main_thread_waker.drain()

        # Step 3: process the IOCP queue. If new events arrive while we're
        # processing the queue then we leave them for next time.
        # XX should probably have some sort emergency bail out if the queue
        # gets too long?
        for _ in range(len(self._iocp_queue)):
            msg = self._iocp_queue.popleft()
            if isinstance(msg, BaseException):
                # IOCP thread encountered some unexpected error -- give up and
                # let the user know.
                raise msg
            batch, received = msg
            for i in range(received):
                entry = batch[i]
                if entry.lpCompletionKey == 0:
                    # Regular I/O event, dispatch on lpOverlapped
                    waiter = self._overlapped_waiters.pop(entry.lpOverlapped)
                    _core.reschedule(waiter)
                else:
                    # dispatch on lpCompletionKey
                    queue = self._completion_key_queues[entry.lpCompletionKey]
                    overlapped = int(ffi.cast("uintptr_t", entry.lpOverlapped))
                    transferred = entry.dwNumberOfBytesTransferred
                    info = CompletionKeyEventInfo(
                        lpOverlapped=overlapped,
                        dwNumberOfBytesTransferred=transferred,
                    )
                    queue.put_nowait(info)
예제 #17
0
 async def set(self, value):
     await self._q_w.send(outcome.Value(value))
예제 #18
0
파일: _impl.py 프로젝트: oremanj/greenback
def _greenback_shim(orig_coro: Coroutine[Any, Any, Any]) -> Generator[Any, Any, Any]:
    # In theory this could be written as a simpler function that uses
    # _greenback_shim_sync():
    #
    #     next_yield = "ready"
    #     while True:
    #         try:
    #             target = partial(orig_coro.send, (yield next_yield))
    #         except BaseException as ex:
    #             target = partial(orig_coro.throw, ex)
    #         try:
    #             next_yield = yield from _greenback_shim_sync(target)
    #         except StopIteration as ex:
    #             return ex.value
    #
    # In practice, this doesn't work that well: _greenback_shim_sync()
    # has a hard time raising StopIteration, because it's a generator,
    # and unrolling it into a non-generator iterable makes it slower.
    # So we'll accept a bit of code duplication.
    parent_greenlet = greenlet.getcurrent()

    # The greenlet in which each send() or throw() call will occur.
    child_greenlet: Optional[greenlet.greenlet] = None

    # The contextvars.Context that we have most recently seen as active
    # for this task and propagated to child_greenlet
    curr_ctx: Optional[contextvars.Context] = None

    # The next thing we plan to yield to the event loop. (The first yield
    # goes to ensure_portal() rather than to the event loop, so we use a
    # string that is unlikely to be a valid event loop trap.)
    next_yield: Any = "ready"

    # The next thing we plan to send to the original coroutine. This is an
    # outcome representing the value or error that the event loop resumed
    # us with.
    next_send: outcome.Outcome[Any]
    while True:
        try:
            # Normally we send to orig_coro whatever the event loop sent us
            next_send = outcome.Value((yield next_yield))
        except BaseException as ex:
            # If the event loop resumed us with an error, we forward that error
            next_send = outcome.Error(ex)
        try:
            if not child_greenlet:
                # Start a new send() or throw() call on the original coroutine.
                child_greenlet = greenlet.greenlet(next_send.send)
                switch_arg: Any = orig_coro
            else:
                # Resume the previous send() or throw() call, which is currently
                # at a simulated yield point in a greenback.await_() call.
                switch_arg = next_send

            if (
                greenlet_needs_context_fixup
                and parent_greenlet.gr_context is not curr_ctx
                and child_greenlet.gr_context is curr_ctx
            ):
                # Make sure the child greenlet's contextvars context
                # is the same as our own, even if our own context
                # changes (such as via trio.Task.context assignment),
                # unless the child greenlet appears to have changed
                # its context privately through a call to Context.run().
                #
                # Note 'parent_greenlet.gr_context' here is just a
                # portable way of getting the current contextvars
                # context, which is not exposed by the contextvars
                # module directly (copy_context() returns a copy, not
                # a new reference to the original).  Upon initial
                # creation of child_greenlet, curr_ctx and
                # child_greenlet.gr_context will both be None, so this
                # condition works for that case too.
                child_greenlet.gr_context = curr_ctx = parent_greenlet.gr_context

            next_yield = child_greenlet.switch(switch_arg)
            if child_greenlet.dead:
                # The send() or throw() call completed so we need to
                # create a new greenlet for the next one.
                child_greenlet = curr_ctx = None
        except StopIteration as ex:
            # The underlying coroutine completed, so we forward its return value.
            return ex.value
예제 #19
0
 def set(self, value):
     """Set the internal flag value to True, and wake any waiting tasks."""
     self.value = outcome.Value(value)
     self.event.set()
     return anyio.DeprecatedAwaitable(self.set)
예제 #20
0
파일: _impl.py 프로젝트: oremanj/greenback
def _greenback_shim_sync(target: Callable[[], Any]) -> Generator[Any, Any, Any]:
    """Run target(), forwarding the event loop traps and responses necessary
    to implement any await_() calls that it makes.

    This is only a little bit faster than using greenback_shim() plus a
    sync-to-async wrapper -- maybe 2us faster for the entire call,
    so it only matters when you're scoping the portal to a very small
    range. We ship it anyway because it's easier to understand than
    the async-compatible _greenback_shim(), and helps with understanding
    the latter.
    """

    parent_greenlet = greenlet.getcurrent()
    curr_ctx = None

    # The greenlet in which we run target().
    child_greenlet = greenlet.greenlet(target)

    # The next thing we plan to yield to the event loop.
    next_yield: Any

    # The next thing we plan to send via greenlet.switch(). This is an
    # outcome representing the value or error that the event loop resumed
    # us with. Initially None for the very first zero-argument switch().
    next_send: Optional[outcome.Outcome[Any]] = None

    while True:
        if (
            greenlet_needs_context_fixup
            and parent_greenlet.gr_context is not curr_ctx
            and child_greenlet.gr_context is curr_ctx
        ):
            # Make sure the child greenlet's contextvars context
            # is the same as our own, even if our own context
            # changes (such as via trio.Task.context assignment),
            # unless the child greenlet appears to have changed
            # its context privately through a call to Context.run().
            #
            # Note 'parent_greenlet.gr_context' here is just a
            # portable way of getting the current contextvars
            # context, which is not exposed by the contextvars
            # module directly (copy_context() returns a copy, not
            # a new reference to the original).  Upon initial
            # creation of child_greenlet, curr_ctx and
            # child_greenlet.gr_context will both be None, so this
            # condition works for that case too.
            child_greenlet.gr_context = curr_ctx = parent_greenlet.gr_context

        if next_send is None:
            next_yield = child_greenlet.switch()
        else:
            next_yield = child_greenlet.switch(next_send)
        if child_greenlet.dead:
            # target() returned, so next_yield is its return value, not an
            # event loop trap. (If it exits with an exception, that exception
            # will propagate out of switch() and thus out of the loop, which
            # is what we want.)
            return next_yield
        try:
            # Normally we send to orig_coro whatever the event loop sent us
            next_send = outcome.Value((yield next_yield))
        except BaseException as ex:
            # If the event loop resumed us with an error, we forward that error
            next_send = outcome.Error(ex)
예제 #21
0
파일: _impl.py 프로젝트: oremanj/greenback
def await_(aw: Awaitable[T]) -> T:
    """Run an async function or await an awaitable from a synchronous function,
    using the portal set up for the current async task by :func:`ensure_portal`,
    :func:`bestow_portal`, :func:`with_portal_run`, or :func:`with_portal_run_sync`.

    ``greenback.await_(foo())`` is equivalent to ``await foo()``, except that
    the `greenback` version can be written in a synchronous function while
    the native version cannot.
    """
    try:
        task = current_task()
        if task not in task_has_portal:
            raise RuntimeError(
                "you must 'await greenback.ensure_portal()' in this task first"
            ) from None
        gr = greenlet.getcurrent().parent
    except BaseException:
        if isinstance(aw, collections.abc.Coroutine):
            # Suppress the "coroutine was never awaited" warning
            aw.close()
        raise

    # If this is a non-coroutine awaitable, turn it into a coroutine
    if isinstance(aw, collections.abc.Coroutine):
        coro: Coroutine[Any, Any, T] = aw
        trim_tb_frames = 2
    else:
        coro = adapt_awaitable(aw)
        trim_tb_frames = 3

    # Step through the coroutine until it's exhausted, sending each trap
    # into the portal for the event loop to process.
    next_send: outcome.Outcome[Any] = outcome.Value(None)
    while True:
        try:
            # next_yield is a Future (under asyncio) or a checkpoint
            # or WaitTaskRescheduled marker (under Trio)
            next_yield: Any = next_send.send(coro)  # type: ignore
        except StopIteration as ex:
            return ex.value  # type: ignore
        except BaseException as ex:
            # Trim internal frames for a nicer traceback.
            # ex.__traceback__ covers the next_send.send(coro) line above;
            # its tb_next is in Value.send() or Error.send();
            # and tb_next of that covers the outermost frame in the user's
            # coroutine, which is what interests us.
            tb = ex.__traceback__
            assert tb is not None
            for _ in range(trim_tb_frames):
                if tb.tb_next is None:
                    # If we get here, there were fewer traceback frames
                    # than we expected, meaning we probably didn't
                    # even make it to the user's code. Don't do any
                    # trimming.
                    raise
                tb = tb.tb_next
            exception_from_greenbacked_function = ex.with_traceback(tb)
            # This line shows up in tracebacks, so give the variable a good name
            raise exception_from_greenbacked_function

        # next_send is an outcome.Outcome representing the value or error
        # with which the event loop wants to resume the task
        next_send = gr.switch(next_yield)
예제 #22
0
파일: test_core.py 프로젝트: dr5801/qtrio
def test_outcome_from_application_return_code_value():
    """Zero return code results in outcome.Value."""
    result = qtrio._core.outcome_from_application_return_code(return_code=0)

    assert result == outcome.Value(0)
예제 #23
0
 def set(self, value):
     """Set the result to return this value, and wake any waiting task."""
     if self.value is not None:
         return
     self.value = outcome.Value(value)
     self.event.set()
예제 #24
0
    async def run(self, test_ctx, contextvars_ctx):
        __tracebackhide__ = True

        # This is a gross hack. I guess Trio should provide a context=
        # argument to start_soon/start?
        task = trio.hazmat.current_task()
        assert canary not in task.context
        task.context = contextvars_ctx
        # Force a yield so we pick up the new context
        await trio.sleep(0)
        # Check that it worked, since technically trio doesn't *guarantee*
        # that sleep(0) will actually yield.
        assert canary.get() == "in correct context"

        # This 'with' block handles the nursery fixture lifetime, the
        # teardone_done event, and crashing the context if there's an
        # unhandled exception.
        async with self._fixture_manager(test_ctx) as nursery_fixture:
            # Resolve our kwargs
            resolved_kwargs = {}
            for name, value in self._pytest_kwargs.items():
                if isinstance(value, TrioFixture):
                    await value.setup_done.wait()
                    if value.fixture_value is NURSERY_FIXTURE_PLACEHOLDER:
                        resolved_kwargs[name] = nursery_fixture
                    else:
                        resolved_kwargs[name] = value.fixture_value
                else:
                    resolved_kwargs[name] = value

            # If something's already crashed before we're ready to start, then
            # there's no point in even setting up.
            if test_ctx.crashed:
                return

            # Run actual fixture setup step
            if self._is_test:
                # Tests are exactly like fixtures, except that they (1) have
                # to be regular async functions, (2) if there's a crash, we
                # should cancel them.
                assert not self.user_done_events
                func_value = None
                with trio.CancelScope() as cancel_scope:
                    test_ctx.test_cancel_scope = cancel_scope
                    assert not test_ctx.crashed
                    await self._func(**resolved_kwargs)
            else:
                func_value = self._func(**resolved_kwargs)
                if isinstance(func_value, Coroutine):
                    self.fixture_value = await func_value
                elif isasyncgen(func_value):
                    self.fixture_value = await func_value.asend(None)
                elif isinstance(func_value, Generator):
                    self.fixture_value = func_value.send(None)
                else:
                    # Regular synchronous function
                    self.fixture_value = func_value

            # Notify our users that self.fixture_value is ready
            self.setup_done.set()

            # Wait for users to be finished
            #
            # At this point we're in a very strange state: if the fixture
            # yielded inside a nursery or cancel scope, then we are still
            # "inside" that scope even though its with block is not on the
            # stack. In particular this means that if they get cancelled, then
            # our waiting might get a Cancelled error, that we cannot really
            # deal with – it should get thrown back into the fixture
            # generator, but pytest fixture generators don't work that way:
            #   https://github.com/python-trio/pytest-trio/issues/55
            # And besides, we can't start tearing down until all our users
            # have finished.
            #
            # So if we get an exception here, we crash the context (which
            # cancels the test and starts the cleanup process), save any
            # exception that *isn't* Cancelled (because if its Cancelled then
            # we can't route it to the right place, and anyway the teardown
            # code will get it again if it matters), and then use a shield to
            # keep waiting for the teardown to finish without having to worry
            # about cancellation.
            yield_outcome = outcome.Value(None)
            try:
                for event in self.user_done_events:
                    await event.wait()
            except BaseException as exc:
                assert isinstance(exc, trio.Cancelled)
                yield_outcome = outcome.Error(exc)
                test_ctx.crash(self, None)
                with trio.CancelScope(shield=True):
                    for event in self.user_done_events:
                        await event.wait()

            # Do our teardown
            if isasyncgen(func_value):
                try:
                    await yield_outcome.asend(func_value)
                except StopAsyncIteration:
                    pass
                else:
                    raise RuntimeError("too many yields in fixture")
            elif isinstance(func_value, Generator):
                try:
                    yield_outcome.send(func_value)
                except StopIteration:
                    pass
                else:
                    raise RuntimeError("too many yields in fixture")
예제 #25
0
 async def set(self, value):
     await self.q.put(outcome.Value(value))
예제 #26
0
 async def set(self, value):
     """Set the result to return this value, and wake any waiting task.
     """
     self.value = outcome.Value(value)
     await self.event.set()
예제 #27
0
 async def set(self, value):
     """Set the internal flag value to True, and wake any waiting tasks."""
     self.value = outcome.Value(value)
     await self.event.set()
예제 #28
0
 def send(self, value: SendType) -> None:
     return self.resume(outcome.Value(value))
예제 #29
0
    def handle_io(self, timeout):
        # Step 0: the first time through, initialize the IOCP thread
        if self._iocp_thread is None:
            # The rare non-daemonic thread -- close() should always be called,
            # even on error paths, and we want to join it there.
            self._iocp_thread = threading.Thread(
                target=self._iocp_thread_fn, name="trio-IOCP"
            )
            self._iocp_thread.start()

        # Step 1: select for sockets, with the given timeout.
        # If there are events queued from the IOCP thread, then the timeout is
        # implicitly reduced to 0 b/c the wakeup socket has pending data in
        # it.
        def socket_ready(what, sock, result):
            task = self._socket_waiters[what].pop(sock)
            _core.reschedule(task, result)

        def socket_check(what, sock):
            try:
                select([sock], [sock], [sock], 0)
            except OSError as exc:
                socket_ready(what, sock, outcome.Error(exc))

        def do_select():
            r_waiting = self._socket_waiters["read"]
            w_waiting = self._socket_waiters["write"]
            # We select for exceptional conditions on the writable set because
            # on Windows, a failed non-blocking connect shows up as
            # "exceptional". Everyone else uses "writable" for this, so we
            # normalize it.
            r, w1, w2 = select(r_waiting, w_waiting, w_waiting, timeout)
            return r, set(w1 + w2)

        try:
            r, w = do_select()
        except OSError:
            # Some socket was closed or similar. Track it down and get rid of
            # it.
            for what in ["read", "write"]:
                for sock in self._socket_waiters[what]:
                    socket_check(what, sock)
            r, w = do_select()

        for sock in r:
            if sock is not self._main_thread_waker.wakeup_sock:
                socket_ready("read", sock, outcome.Value(None))
        for sock in w:
            socket_ready("write", sock, outcome.Value(None))

        # Step 2: drain the wakeup socket.
        # This must be done before checking the IOCP queue.
        self._main_thread_waker.drain()

        # Step 3: process the IOCP queue. If new events arrive while we're
        # processing the queue then we leave them for next time.
        # XX should probably have some sort emergency bail out if the queue
        # gets too long?
        for _ in range(len(self._iocp_queue)):
            msg = self._iocp_queue.popleft()
            if isinstance(msg, BaseException):
                # IOCP thread encountered some unexpected error -- give up and
                # let the user know.
                raise msg
            batch, received = msg
            for i in range(received):
                entry = batch[i]
                if entry.lpCompletionKey == 0:
                    # Regular I/O event, dispatch on lpOverlapped
                    waiter = self._overlapped_waiters.pop(entry.lpOverlapped)
                    _core.reschedule(waiter)
                elif entry.lpCompletionKey == 1:
                    # Post made by a regular I/O event's abort_fn
                    # after it failed to cancel the I/O. If we still
                    # have a waiter with this lpOverlapped, we didn't
                    # get the regular I/O completion and almost
                    # certainly the user forgot to call
                    # register_with_iocp.
                    self._posted_too_late_to_cancel.remove(entry.lpOverlapped)
                    try:
                        waiter = self._overlapped_waiters.pop(
                            entry.lpOverlapped
                        )
                    except KeyError:
                        # Looks like the actual completion got here
                        # before this fallback post did -- we're in
                        # the "expected" case of too-late-to-cancel,
                        # where the user did nothing wrong and the
                        # main thread just got backlogged relative to
                        # the IOCP thread somehow. Nothing more to do.
                        pass
                    else:
                        exc = _core.TrioInternalError(
                            "Failed to cancel overlapped I/O in {} and didn't "
                            "receive the completion either. Did you forget to "
                            "call register_with_iocp()?".format(waiter.name)
                        )
                        # Raising this out of handle_io ensures that
                        # the user will see our message even if some
                        # other task is in an uncancellable wait due
                        # to the same underlying forgot-to-register
                        # issue (if their CancelIoEx succeeds, we
                        # have no way of noticing that their completion
                        # won't arrive). Unfortunately it loses the
                        # task traceback. If you're debugging this
                        # error and can't tell where it's coming from,
                        # try changing this line to
                        # _core.reschedule(waiter, outcome.Error(exc))
                        raise exc
                else:
                    # dispatch on lpCompletionKey
                    queue = self._completion_key_queues[entry.lpCompletionKey]
                    overlapped = int(ffi.cast("uintptr_t", entry.lpOverlapped))
                    transferred = entry.dwNumberOfBytesTransferred
                    info = CompletionKeyEventInfo(
                        lpOverlapped=overlapped,
                        dwNumberOfBytesTransferred=transferred,
                    )
                    queue.put_nowait(info)
예제 #30
0
파일: test_core.py 프로젝트: dr5801/qtrio
def test_outcomes_unwrap_returns_qt_value_over_trio_none():
    """Unwrapping an Outcomes prioritizes a Qt value over a Trio None."""
    this_outcome = qtrio.Outcomes(qt=outcome.Value(3))
    result = this_outcome.unwrap()

    assert result == 3