예제 #1
0
    async def _open_channel(self, predicate, buffer):
        send_channel, recv_channel = trio.open_memory_channel(buffer)

        async with send_channel, recv_channel:
            async with self._lock:
                self._send_channels[send_channel] = predicate

            try:
                yield recv_channel

            finally:
                with trio.CancelScope(shield=True):
                    async with self._lock:
                        del self._send_channels[send_channel]
예제 #2
0
 def __init__(self, app, vault, update_on_idle=False):
     self.app = app
     self.vault = vault
     self.lock = trio.Lock()
     self.ready = False
     self.nursery = None  # type: Optional[Nursery]
     self.update_on_idle = update_on_idle
     self.logger = VaultLoggerAdapter(self.vault,
                                      logging.getLogger(__name__))
     send_channel, receive_channel = trio.open_memory_channel(
         128)  # type: Tuple[trio.abc.SendChannel, trio.abc.ReceiveChannel]
     self.file_changes_send_channel = send_channel  # type: trio.abc.SendChannel
     self.file_changes_receive_channel = receive_channel  # type: trio.abc.ReceiveChannel
     self.cancel_scope = trio.CancelScope()
예제 #3
0
 def open_child(self, *, shield: Optional[bool] = None) -> trio.CancelScope:
     """Return a new child cancel scope.
     The child will start out cancelled if the parent
     :meth:`cancel` method has been called. Its initial shield state
     is given by the ``shield`` argument, or by the parent's
     :attr:`shield` attribute if the ``shield`` argument is not specified.
     """
     if shield is None:
         shield = self._shield
     new_scope = trio.CancelScope(shield=shield)
     if self._cancel_called:
         new_scope.cancel()
     self._child_scopes.add(new_scope)
     return new_scope
예제 #4
0
 async def _do_writes(self) -> None:
     async with self.running_write.needs_run() as needs_run:
         if needs_run:
             writes = self.pending_writes
             self.pending_writes = []
             writes = [op for op in writes if not op.cancelled]
             if len(writes) == 0:
                 return
             # TODO we should not use a cancel scope shield, we should use the SyscallResponse API
             with trio.CancelScope(shield=True):
                 await self._unlocked_batch_write([(write.dest, write.data)
                                                   for write in writes])
             for write in writes:
                 write.done = True
예제 #5
0
 async def run(cls, device: LocalDevice, localdb: LocalDatabase,
               realm_id: EntryID) -> AsyncIterator["ManifestStorage"]:
     self = cls(device, localdb, realm_id)
     await self._create_db()
     try:
         yield self
     finally:
         with trio.CancelScope(shield=True):
             # Flush the in-memory cache before closing the storage
             try:
                 await self._flush_cache_ahead_of_persistance()
             # Ignore storage closed exceptions, since it follows an operational error
             except FSLocalStorageClosedError:
                 pass
예제 #6
0
    async def _manage_operational_error(self,
                                        allow_commit: bool = False
                                        ) -> AsyncIterator[None]:
        """Close the local database when an operational error is detected

        Operational errors have to be treated with care since they usually indicate
        that the current transaction has been rolled back. Since parsec has its own
        in-memory cache for manifests, this can lead to complicated bugs and possibly
        database corruptions (in the sense that a committed manifest might reference a
        chunk of data that has not been successfully committed). See issue #1535 for an
        example of such problem.

        If an operational error is detected we simply close the connection and invalidate
        the local database object while raising an FSLocalStorageOperationalError exception.
        This way, we force the core to create new workspace storage objects and therefore
        discarding any uncommited data.
        """
        in_transaction_before = self._conn.in_transaction
        # Safe context for operational errors
        try:
            try:
                yield

            # Extra checks for end of transaction
            finally:
                end_of_transaction_detected = (in_transaction_before and
                                               not self._conn.in_transaction)
                if not allow_commit and end_of_transaction_detected:
                    raise OperationalError(
                        "A forbidden commit/rollback has been detected")

        # An operational error has been detected
        except OperationalError as exception:

            with trio.CancelScope(shield=True):

                # Close the sqlite3 connection
                try:
                    await self.run_in_thread(self._conn.close)

                # Ignore second operational error (it should not happen though)
                except OperationalError:
                    pass

                # Mark the local database as closed
                finally:
                    del self._conn

                # Raise the dedicated operational error
                raise FSLocalStorageOperationalError from exception
예제 #7
0
async def tooltip_task(show_tooltip, hide_tooltip, show_delay, hide_delay,
                       task_status):
    """Manage a tooltip window visibility, position, and text."""

    send_chan, recv_chan = trio.open_memory_channel(0)
    cancel_scope = trio.CancelScope()  # dummy starter object

    async def single_show_hide(task_status):
        with cancel_scope:
            task_status.started()
            if text is None:
                return
            await trio.sleep(show_delay)
            show_tooltip(x, y, text)
            await trio.sleep(hide_delay)
        hide_tooltip()

    async with trio.open_nursery() as nursery:
        task_status.started(send_chan)
        async for x, y, text in recv_chan:
            cancel_scope.cancel()
            cancel_scope = trio.CancelScope()
            await nursery.start(single_show_hide)
예제 #8
0
파일: _debug.py 프로젝트: goodboy/tractor
async def maybe_wait_for_debugger(
    poll_steps: int = 2,
    poll_delay: float = 0.1,
    child_in_debug: bool = False,
) -> None:

    if not debug_mode() and not child_in_debug:
        return

    if (is_root_process()):
        global _no_remote_has_tty, _global_actor_in_debug, _wait_all_tasks_lock

        # If we error in the root but the debugger is
        # engaged we don't want to prematurely kill (and
        # thus clobber access to) the local tty since it
        # will make the pdb repl unusable.
        # Instead try to wait for pdb to be released before
        # tearing down.

        sub_in_debug = None

        for _ in range(poll_steps):

            if _global_actor_in_debug:
                sub_in_debug = tuple(_global_actor_in_debug)

            log.debug('Root polling for debug')

            with trio.CancelScope(shield=True):
                await trio.sleep(poll_delay)

                # TODO: could this make things more deterministic?  wait
                # to see if a sub-actor task will be scheduled and grab
                # the tty lock on the next tick?
                # XXX: doesn't seem to work
                # await trio.testing.wait_all_tasks_blocked(cushion=0)

                debug_complete = _no_remote_has_tty
                if ((debug_complete and not debug_complete.is_set())):
                    log.debug('Root has errored but pdb is in use by '
                              f'child {sub_in_debug}\n'
                              'Waiting on tty lock to release..')

                    await debug_complete.wait()

                await trio.sleep(poll_delay)
                continue
        else:
            log.debug('Root acquired TTY LOCK')
예제 #9
0
 def _decref(self, ref: 'WidgetWeakRef | Handle') -> None:
     """A label was no longer set to this handle."""
     if self._force_loaded or (self._cached_tk is None and self._cached_pil is None):
         return
     self._users.discard(ref)
     if self.type is TYP_COMP:
         for child in cast('Sequence[Handle]', self.arg):
             child._decref(self)
     elif self.type is TYP_CROP:
         cast(CropInfo, self.arg).source._decref(self)
     if not self._users and _load_nursery is not None:
         # Schedule this handle to be cleaned up, and store a cancel scope so that
         # can be aborted.
         self._cancel_cleanup = trio.CancelScope()
         _load_nursery.start_soon(self._cleanup_task, self._cancel_cleanup)
예제 #10
0
파일: vlctrio.py 프로젝트: KenT2/pp-vlc
 async def gpio_task(self):
     gpio.setwarnings(True)
     gpio.setmode(gpio.BOARD)
     # dummy to stop gpio complaining
     gpio.setup(11, gpio.IN)
     count = 0
     # use a CancelScope to end this simple task
     with trio.CancelScope() as self.gpio_scope:
         while True:
             print("  gpio: sample inputs", count)
             count += 1
             #await send_channel.send(['event ',str(count)])
             await trio.sleep(5)
     gpio.cleanup()
     print("  gpio: exiting!")
예제 #11
0
async def test_unmount_due_to_cancelled_scope(base_mountpoint, alice,
                                              alice_user_fs, event_bus):
    mountpoint_path = base_mountpoint / "w"
    wid = await alice_user_fs.workspace_create("w")

    with trio.CancelScope() as cancel_scope:
        async with mountpoint_manager_factory(
                alice_user_fs, event_bus,
                base_mountpoint) as mountpoint_manager:

            await mountpoint_manager.mount_workspace(wid)
            cancel_scope.cancel()

    # Mountpoint path should be removed on umounting
    assert not await trio.Path(mountpoint_path).exists()
예제 #12
0
            async def consume(task_status=trio.TASK_STATUS_IGNORED):
                print('starting consume task..')
                nonlocal stream

                with trio.CancelScope() as cs:
                    task_status.started(cs)

                    # shield stream's underlying channel from cancellation
                    # with stream.shield():
                    async for v in stream:
                        print(f'from stream: {v}')
                        expect.remove(v)
                        received.append(v)

                    print('exited consume')
예제 #13
0
 async def spinner_scope():
     nonlocal outstanding_scopes, pending_or_active_cscope
     nonlocal ending_or_inactive_cscope, spinner_pending_or_active
     ending_or_inactive_cscope.cancel()
     outstanding_scopes += 1
     with trio.CancelScope() as cancel_scope:
         try:
             await spinner_pending_or_active.wait()
             # Invariant: spinner_pending_or_active set while any scopes entered
             # Invariant: pending_or_active_cscope entered while any scopes entered
             # (the former ensures the latter)
             yield cancel_scope
         finally:
             assert outstanding_scopes > 0
             outstanding_scopes -= 1
             # Invariant: if zero, event states are equivalent to initial state
             # just after calling task_status.started
             if not outstanding_scopes:
                 # these actions must occur atomically to satisfy the invariant
                 # because the very next task scheduled may open a spinner_scope
                 pending_or_active_cscope.cancel()
                 spinner_pending_or_active = trio.Event()
                 ending_or_inactive_cscope = trio.CancelScope()
                 pending_or_active_cscope = trio.CancelScope()
예제 #14
0
 async def execf(self, tid, res, fn):
     with self.wg, \
          trio.CancelScope() as cs:
         self.cg.add(cs)
         GVAR.res = res
         GVAR.instance = "local"
         try:
             with optional_cm(trio.fail_after, getattr(res,'T',0)):
                 ok, r = True, await fn()
                 self.health = FULL_HEALTH
         except BaseException as e:
             if type(e) is trio.Cancelled: raise
             ok, r = False, (traceback.format_exc(), e)
         self.cg.remove(cs)
         return ok, r
예제 #15
0
 async def protocol_send(self, event: Event) -> None:
     if isinstance(event, RawData):
         async with self.send_lock:
             try:
                 with trio.CancelScope() as cancel_scope:
                     cancel_scope.shield = True
                     await self.stream.send_all(event.data)
             except (trio.BrokenResourceError, trio.ClosedResourceError):
                 await self.protocol.handle(Closed())
     elif isinstance(event, Closed):
         await self._close()
         await self.protocol.handle(Closed())
     elif isinstance(event, Updated):
         pass  # Triggers the keep alive timeout update
     await self._update_keep_alive_timeout()
예제 #16
0
파일: kraken.py 프로젝트: pikers/piker
async def backfill_bars(

    sym: str,
    shm: ShmArray,  # type: ignore # noqa
    count: int = 10,  # NOTE: any more and we'll overrun the underlying buffer
    task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,

) -> None:
    '''
    Fill historical bars into shared mem / storage afap.
    '''
    with trio.CancelScope() as cs:
        async with open_cached_client('kraken') as client:
            bars = await client.bars(symbol=sym)
            shm.push(bars)
            task_status.started(cs)
예제 #17
0
 async def _call_async(self, task_status=trio.TASK_STATUS_IGNORED):
     assert not self._is_sync
     if self._cancelled:
         return
     task_status.started()
     try:
         with trio.CancelScope() as scope:
             self._scope = scope
             if self._is_sync is None:
                 await self._callback(self)
             else:
                 await self._callback(*self._args)
     except Exception as exc:
         self._raise(exc)
     finally:
         self._scope = None
예제 #18
0
        async def callback(value: datastore.abc.ReceiveStream) -> None:
            try:
                await self._receive_and_write(file, value)
            except BaseException:
                try:
                    await file.aclose()
                finally:
                    with trio.CancelScope(shield=True):
                        await file.unlink()
                raise
            else:
                await file.aclose()

                if self._stats is not None:
                    async with self._stats_lock:  # type: ignore[union-attr]
                        await self._put_replace(file.name, target_file.name)
예제 #19
0
async def test_predicate_eval_scope(wait_function, predicate_return, nursery):
    # predicate evaluations are not expected outside of wait_* method lifetime
    x = AsyncValue(0)
    predicate = Mock(return_value=predicate_return)
    cancel_scope = trio.CancelScope()

    @nursery.start_soon
    async def _wait():
        with cancel_scope:
            await wait_function(x, predicate)

    await wait_all_tasks_blocked()
    predicate_call_count = predicate.call_count
    cancel_scope.cancel()
    await wait_all_tasks_blocked()
    x.value = 10
    assert predicate.call_count == predicate_call_count
예제 #20
0
async def _stop_fuse_thread(mountpoint_path, fuse_operations,
                            fuse_thread_stopped):
    if fuse_thread_stopped.is_set():
        return

    # Ask for dummy file just to force a fuse operation that will
    # process the `fuse_exit` from a valid context

    with trio.CancelScope(shield=True):
        logger.info("Stopping fuse thread...", mountpoint=mountpoint_path)
        fuse_operations.schedule_exit()
        try:
            await trio.Path(mountpoint_path / "__shutdown_fuse__").exists()
        except OSError:
            pass
        await trio.to_thread.run_sync(fuse_thread_stopped.wait)
        logger.info("Fuse thread stopped", mountpoint=mountpoint_path)
예제 #21
0
async def test_start(autojump_clock: trio.testing.MockClock) -> None:
    record = []

    async def sleep_then_start(val: int, *, task_status: TaskStatus[int]) -> None:
        await trio.sleep(1)
        task_status.started(val)
        try:
            await trio.sleep(10)
            record.append("background task finished")  # pragma: no cover
        finally:
            record.append("background task exiting")

    async def shielded_sleep_then_start(*, task_status: TaskStatus[None]) -> None:
        with trio.CancelScope(shield=True):
            await trio.sleep(1)
        task_status.started()
        await trio.sleep(10)

    async with open_service_nursery() as nursery:
        # Child can be cancelled normally while it's starting
        with trio.move_on_after(0.5) as scope:
            await nursery.start(sleep_then_start, 1)
        assert scope.cancelled_caught
        assert not nursery.child_tasks

        # If started() is the first thing to notice a cancellation, the task
        # stays in the old nursery and remains unshielded
        with trio.move_on_after(0.5) as scope:
            await nursery.start(shielded_sleep_then_start)
        assert scope.cancelled_caught
        assert not nursery.child_tasks

        assert trio.current_time() == 1.5

        # Otherwise, once started() is called the child is shielded until
        # the 'async with' block exits.
        assert 42 == await nursery.start(sleep_then_start, 42)
        assert trio.current_time() == 2.5

        nursery.cancel_scope.cancel()
        with trio.CancelScope(shield=True):
            await trio.sleep(1)
        record.append("parent task finished")

    assert trio.current_time() == 3.5
    assert record == ["parent task finished", "background task exiting"]
예제 #22
0
async def pack_matches(

    view: CompleterView,
    has_results: dict[str, set[str]],
    matches: dict[(str, str), list[str]],
    provider: str,
    pattern: str,
    search: Callable[..., Awaitable[dict]],

    task_status: TaskStatus[
        trio.CancelScope] = trio.TASK_STATUS_IGNORED,

) -> None:

    log.info(f'Searching {provider} for "{pattern}"')

    if provider != 'cache':
        # insert provider entries with search status
        view.set_section_entries(
            section=provider,
            values=[],
        )
        view.clear_section(provider, status_field='-> searchin..')

    else:  # for the cache just clear it's entries and don't put a status
        view.clear_section(provider)

    with trio.CancelScope() as cs:
        task_status.started(cs)
        # ensure ^ status is updated
        results = await search(pattern)

    if provider != 'cache':  # XXX: don't cache the cache results xD
        matches[(provider, pattern)] = results

        # print(f'results from {provider}: {results}')
        has_results[pattern].add(provider)

    if results:
        # display completion results
        view.set_section_entries(
            section=provider,
            values=results,
        )
    else:
        view.clear_section(provider)
예제 #23
0
    async def write_request(self, syscall: Syscall) -> ConnectionResponse:
        """Write a syscall request, returning a ConnectionResponse

        The ConnectionResponse will eventually have .result set to contain the
        syscall return value; you can call read_pending_responses to do work on
        the connection until that happens.

        """
        request = ConnectionRequest(syscall)
        self.pending_requests.append(request)
        # TODO as a hack, so we don't have to figure it out now, we don't allow
        # a syscall request to be cancelled before it's actually made. we could
        # make this work later, and that would reduce some blocking from waitid
        with trio.CancelScope(shield=True):
            while request.response is None:
                await self._write_pending_requests()
        return request.response
예제 #24
0
    async def __call__(self, title='Please wait', ticks=100):
        '''Shows the dialog and creates a context for "ticking".

        Args:
            title (str): dialof window title, default "Please wait"
            ticks (int): total number of expected ticks (can be approximate). Establishes
                the "scale" of the progress bar.

        Withing this context one can use :meth:`tick()` to advance the progress and
        change tick label.
        '''
        if ticks <= 0:
            raise ValueError('units should be positive')

        if self._level > 0:
            # allow passing progress object on to the sub-routines
            yield self
            return

        self._ui = QProgressDialog(title, 'Abort', 0, ticks, self._parent)
        self._ui.setWindowModality(Qt.WindowModality.WindowModal)
        self._ui.setMinimumDuration(500)
        self._throttler = _ThrottlingProgressProxy(self._ui)

        with trio.CancelScope() as cancel_scope:
            try:
                self._level += 1
                with connect(self._ui.canceled, cancel_scope.cancel):
                    yield
            except (Exception, trio.Cancelled) as err:
                self._ui.hide()
                message = str(err)
                if sys.exc_info()[0] != None:
                    import traceback
                    stack = traceback.format_exc(10)
                    if stack:
                        message += '\n' + stack

                QMessageBox.critical(self._parent, 'Error', message)
            finally:
                self._level -= 1

                self._ui.setValue(self._ui.maximum())
                self._ui.hide()
                self._ui.destroy()
                self._ui = None
예제 #25
0
파일: _ahab.py 프로젝트: pikers/piker
async def open_ahabd(
    ctx: tractor.Context,
    endpoint: str,  # ns-pointer str-msg-type
    **kwargs,
) -> None:
    get_console_log('info', name=__name__)

    async with open_docker() as client:

        # TODO: eventually offer a config-oriented API to do the mounts,
        # params, etc. passing to ``Containter.run()``?
        # call into endpoint for container config/init
        ep_func = NamespacePath(endpoint).load_ref()
        dcntr, cntr_config = ep_func(client)
        cntr = Container(dcntr)

        with trio.move_on_after(1):
            found = await cntr.process_logs_until(
                "launching tcp listener for all services...", )

            if not found and cntr not in client.containers.list():
                raise RuntimeError(
                    'Failed to start `marketstore` check logs deats')

        await ctx.started((
            cntr.cntr.id,
            os.getpid(),
            cntr_config,
        ))

        try:

            # TODO: we might eventually want a proxy-style msg-prot here
            # to allow remote control of containers without needing
            # callers to have root perms?
            await trio.sleep_forever()

        except (BaseException,
                # trio.Cancelled,
                # KeyboardInterrupt,
                ):

            with trio.CancelScope(shield=True):
                await cntr.cancel()

            raise
예제 #26
0
 async def _writer_loop(self,
                        fd,
                        handle,
                        task_status=trio.TASK_STATUS_IGNORED):
     with trio.CancelScope() as scope:
         handle._scope = scope
         task_status.started()
         try:
             while not handle._cancelled:  # pragma: no branch
                 await _wait_writable(fd)
                 handle._call_sync()
                 await self.synchronize()
         except Exception as exc:
             _h_raise(handle, exc)
             return
         finally:
             handle._scope = None
예제 #27
0
    def __init__(
        self,
        name: str,
        daemon: bool,
        parent: Optional[TaskWithChildrenAPI],
        async_fn: AsyncFn,
        async_fn_args: Sequence[Any],
    ) -> None:
        super().__init__(name, daemon, parent, async_fn, async_fn_args)

        # We use an event to manually track when the child task is "done".
        # This is because trio has no API for awaiting completion of a task.
        self._done = trio.Event()

        # Each task gets its own `CancelScope` which is how we can manually
        # control cancellation order of the task DAG
        self._cancel_scope = trio.CancelScope()
예제 #28
0
    async def _vanilla_write_read(self, cmd: str, data: dict) -> dict:
        packet = self._packet
        self._packet += 1
        data = {
            'data': data,
            'cmd': cmd,
            'packet': packet,
        }
        data = self.encode(data)

        # there's no mechanism currently to cancel and also cancel on remote
        with trio.CancelScope(shield=True):
            await self.write_socket(data, self.socket)
            res = await self.read_decode_json_buffers(self.socket)
            self.raise_return_value(res, packet)

        return res
예제 #29
0
 async def __run_trio(self, h):
     """Helper for copying the result of a Trio task to an asyncio future"""
     f, proc, *args = h._args
     if f.cancelled():  # pragma: no cover
         return
     try:
         with trio.CancelScope() as scope:
             h._scope = scope
             res = await proc(*args)
         if scope.cancelled_caught:
             f.cancel()
             return
     except BaseException as exc:
         if not f.cancelled():  # pragma: no branch
             f.set_exception(exc)
     else:
         if not f.cancelled():  # pragma: no branch
             f.set_result(res)
예제 #30
0
파일: dialogs.py 프로젝트: battyone/qtrio
    async def manage(self) -> typing.AsyncIterator[None]:
        """A context manager to setup the progress dialog, cancel the managed context
        and teardown the dialog when done.
        """
        with _manage(dialog=self):
            if self.dialog is None:  # pragma: no cover
                raise qtrio.InternalError(
                    "Dialog not assigned while it is being managed."
                )

            with trio.CancelScope() as cancel_scope:
                with qtrio._qt.connection(
                    signal=self.dialog.canceled, slot=cancel_scope.cancel
                ):
                    yield

            if self.dialog.wasCanceled():
                raise qtrio.UserCancelledError()