コード例 #1
0
    async def main():

        # make sure it all works within the runtime
        async with tractor.open_root_actor():

            tx, rx = trio.open_memory_channel(1)
            brx = broadcast_receiver(rx, 1)
            cs = trio.CancelScope()

            async def sub_and_recv():
                with cs:
                    async with brx.subscribe() as bc:
                        async for value in bc:
                            print(value)

            async def cancel_and_send():
                await trio.sleep(0.2)
                cs.cancel()
                await tx.send(1)

            async with trio.open_nursery() as n:

                n.start_soon(sub_and_recv)
                await trio.sleep(0.1)
                assert brx._state.recv_ready

                n.start_soon(cancel_and_send)

                # ensure that we don't hang because no-task is now
                # waiting on the underlying receive..
                with trio.fail_after(0.5):
                    value = await brx.receive()
                    print(f'parent: {value}')
                    assert value == 1
コード例 #2
0
async def spawn(is_arbiter, data, arb_addr):
    namespaces = [__name__]

    await trio.sleep(0.1)

    async with tractor.open_root_actor(arbiter_addr=arb_addr, ):

        actor = tractor.current_actor()
        assert actor.is_arbiter == is_arbiter
        data = data_to_pass_down

        if actor.is_arbiter:

            async with tractor.open_nursery() as nursery:

                # forks here
                portal = await nursery.run_in_actor(
                    spawn,
                    is_arbiter=False,
                    name='sub-actor',
                    data=data,
                    arb_addr=arb_addr,
                    enable_modules=namespaces,
                )

                assert len(nursery._children) == 1
                assert portal.channel.uid in tractor.current_actor()._peers
                # be sure we can still get the result
                result = await portal.result()
                assert result == 10
                return result
        else:
            return 10
コード例 #3
0
ファイル: test_discovery.py プロジェクト: goodboy/tractor
async def close_chans_before_nursery(
    arb_addr: tuple,
    use_signal: bool,
    remote_arbiter: bool = False,
) -> None:

    # logic for how many actors should still be
    # in the registry at teardown.
    if remote_arbiter:
        entries_at_end = 2
    else:
        entries_at_end = 1

    async with tractor.open_root_actor(arbiter_addr=arb_addr, ):
        async with tractor.get_arbiter(*arb_addr) as aportal:
            try:
                get_reg = partial(unpack_reg, aportal)

                async with tractor.open_nursery() as tn:
                    portal1 = await tn.start_actor(name='consumer1',
                                                   enable_modules=[__name__])
                    portal2 = await tn.start_actor('consumer2',
                                                   enable_modules=[__name__])

                    # TODO: compact this back as was in last commit once
                    # 3.9+, see https://github.com/goodboy/tractor/issues/207
                    async with portal1.open_stream_from(
                        stream_forever) as agen1:
                        async with portal2.open_stream_from(
                                stream_forever) as agen2:
                            async with trio.open_nursery() as n:
                                n.start_soon(streamer, agen1)
                                n.start_soon(cancel, use_signal, .5)
                                try:
                                    await streamer(agen2)
                                finally:
                                    # Kill the root nursery thus resulting in
                                    # normal arbiter channel ops to fail during
                                    # teardown. It doesn't seem like this is
                                    # reliably triggered by an external SIGINT.
                                    # tractor.current_actor()._root_nursery.cancel_scope.cancel()

                                    # XXX: THIS IS THE KEY THING that
                                    # happens **before** exiting the
                                    # actor nursery block

                                    # also kill off channels cuz why not
                                    await agen1.aclose()
                                    await agen2.aclose()
            finally:
                with trio.CancelScope(shield=True):
                    await trio.sleep(1)

                    # all subactors should have de-registered
                    registry = await get_reg()
                    assert portal1.channel.uid not in registry
                    assert portal2.channel.uid not in registry
                    assert len(registry) == entries_at_end
コード例 #4
0
    async def print_loop():

        async with tractor.open_root_actor(arbiter_addr=arb_addr, ):
            # arbiter is started in-proc if dne
            assert tractor.current_actor().is_arbiter

            for i in range(10):
                nums.append(i)
                await trio.sleep(0.1)
コード例 #5
0
ファイル: _tractor_test.py プロジェクト: goodboy/tractor
            async def _main():
                async with tractor.open_root_actor(
                        # **kwargs,
                        arbiter_addr=arb_addr,
                        loglevel=loglevel,
                        start_method=start_method,

                        # TODO: only enable when pytest is passed --pdb
                        # debug_mode=True,
                ) as actor:
                    await fn(*args, **kwargs)
コード例 #6
0
ファイル: _exec.py プロジェクト: ajmal017/piker
    async def main():

        async with tractor.open_root_actor(
                arbiter_addr=(
                    tractor._root._default_arbiter_host,
                    tractor._root._default_arbiter_port,
                ),
                name='qtractor',
                **tractor_kwargs,
        ) as a:
            await func(*(args + (widgets, )))
コード例 #7
0
ファイル: _daemon.py プロジェクト: pikers/piker
async def open_pikerd(
    start_method: str = 'trio',
    loglevel: Optional[str] = None,

    # XXX: you should pretty much never want debug mode
    # for data daemons when running in production.
    debug_mode: bool = False,

) -> Optional[tractor._portal.Portal]:
    '''
    Start a root piker daemon who's lifetime extends indefinitely
    until cancelled.

    A root actor nursery is created which can be used to create and keep
    alive underling services (see below).

    '''
    global _services
    assert _services is None

    # XXX: this may open a root actor as well
    async with (
        tractor.open_root_actor(

            # passed through to ``open_root_actor``
            arbiter_addr=_registry_addr,
            name=_root_dname,
            loglevel=loglevel,
            debug_mode=debug_mode,
            start_method=start_method,

            # TODO: eventually we should be able to avoid
            # having the root have more then permissions to
            # spawn other specialized daemons I think?
            enable_modules=_root_modules,
        ) as _,

        tractor.open_nursery() as actor_nursery,
    ):
        async with trio.open_nursery() as service_nursery:

            # # setup service mngr singleton instance
            # async with AsyncExitStack() as stack:

            # assign globally for future daemon/task creation
            _services = Services(
                actor_n=actor_nursery,
                service_n=service_nursery,
                debug_mode=debug_mode,
            )

            yield _services
コード例 #8
0
ファイル: _daemon.py プロジェクト: pikers/piker
async def maybe_open_runtime(
    loglevel: Optional[str] = None,
    **kwargs,

) -> None:
    """
    Start the ``tractor`` runtime (a root actor) if none exists.

    """
    settings = _tractor_kwargs
    settings.update(kwargs)

    if not tractor.current_actor(err_on_no_runtime=False):
        async with tractor.open_root_actor(
            loglevel=loglevel,
            **settings,
        ):
            yield
    else:
        yield
コード例 #9
0
    async def main():
        cache_active: bool = False

        async def enter_cached_mngr(name: str):
            nonlocal cache_active

            if key_on == 'kwargs':
                # make a common kwargs input to key on it
                kwargs = {'task_name': 'same_task_name'}
                assert key is None
            else:
                # different task names per task will be used
                kwargs = {'task_name': name}

            async with tractor.trionics.maybe_open_context(
                maybe_increment_counter,
                kwargs=kwargs,
                key=key,

            ) as (cache_hit, resource):
                if cache_hit:
                    try:
                        cache_active = True
                        assert resource == 1
                        await trio.sleep_forever()
                    finally:
                        cache_active = False
                else:
                    assert resource == 1
                    await trio.sleep_forever()

        with trio.move_on_after(0.5):
            async with (
                tractor.open_root_actor(),
                trio.open_nursery() as n,
            ):

                for i in range(10):
                    n.start_soon(enter_cached_mngr, f'task_{i}')
                    await trio.sleep(0.001)
コード例 #10
0
ファイル: _daemon.py プロジェクト: pikers/piker
async def open_piker_runtime(
    name: str,
    enable_modules: list[str] = [],
    start_method: str = 'trio',
    loglevel: Optional[str] = None,

    # XXX: you should pretty much never want debug mode
    # for data daemons when running in production.
    debug_mode: bool = False,

) -> Optional[tractor._portal.Portal]:
    '''
    Start a piker actor who's runtime will automatically
    sync with existing piker actors in local network
    based on configuration.

    '''
    global _services
    assert _services is None

    # XXX: this may open a root actor as well
    async with (
        tractor.open_root_actor(

            # passed through to ``open_root_actor``
            arbiter_addr=_registry_addr,
            name=name,
            loglevel=loglevel,
            debug_mode=debug_mode,
            start_method=start_method,

            # TODO: eventually we should be able to avoid
            # having the root have more then permissions to
            # spawn other specialized daemons I think?
            enable_modules=_root_modules,
        ) as _,
    ):
        yield tractor.current_actor()
コード例 #11
0
    async def main():

        full = list(range(1000))

        async def get_sub_and_pull(taskname: str):
            async with (
                maybe_open_stream(taskname) as stream,
            ):
                if '0' in taskname:
                    assert isinstance(stream, tractor.MsgStream)
                else:
                    assert isinstance(
                        stream,
                        tractor.trionics.BroadcastReceiver
                    )

                first = await stream.receive()
                print(f'{taskname} started with value {first}')
                seq = []
                async for msg in stream:
                    seq.append(msg)

                assert set(seq).issubset(set(full))
            print(f'{taskname} finished')

        with trio.fail_after(timeout):
            # TODO: turns out this isn't multi-task entrant XD
            # We probably need an indepotent entry semantic?
            async with tractor.open_root_actor():
                async with (
                    trio.open_nursery() as nurse,
                ):
                    for i in range(10):
                        nurse.start_soon(get_sub_and_pull, f'task_{i}')
                        await trio.sleep(0.001)

                print('all consumer tasks finished')
コード例 #12
0
async def cancel_after(wait, arb_addr):
    async with tractor.open_root_actor(arbiter_addr=arb_addr):
        with trio.move_on_after(wait):
            return await a_quadruple_example()
コード例 #13
0
async def spawn_and_check_registry(
    arb_addr: tuple,
    use_signal: bool,
    remote_arbiter: bool = False,
    with_streaming: bool = False,
) -> None:

    async with tractor.open_root_actor(arbiter_addr=arb_addr, ):
        async with tractor.get_arbiter(*arb_addr) as portal:
            # runtime needs to be up to call this
            actor = tractor.current_actor()

            if remote_arbiter:
                assert not actor.is_arbiter

            if actor.is_arbiter:

                async def get_reg():
                    return actor._registry

                extra = 1  # arbiter is local root actor
            else:
                get_reg = partial(portal.run_from_ns, 'self', 'get_registry')
                extra = 2  # local root actor + remote arbiter

            # ensure current actor is registered
            registry = await get_reg()
            assert actor.uid in registry

            try:
                async with tractor.open_nursery() as n:
                    async with trio.open_nursery() as trion:

                        portals = {}
                        for i in range(3):
                            name = f'a{i}'
                            if with_streaming:
                                portals[name] = await n.start_actor(
                                    name=name, enable_modules=[__name__])

                            else:  # no streaming
                                portals[name] = await n.run_in_actor(
                                    trio.sleep_forever, name=name)

                        # wait on last actor to come up
                        async with tractor.wait_for_actor(name):
                            registry = await get_reg()
                            for uid in n._children:
                                assert uid in registry

                        assert len(portals) + extra == len(registry)

                        if with_streaming:
                            await trio.sleep(0.1)

                            pts = list(portals.values())
                            for p in pts[:-1]:
                                trion.start_soon(stream_from, p)

                            # stream for 1 sec
                            trion.start_soon(cancel, use_signal, 1)

                            last_p = pts[-1]
                            await stream_from(last_p)

                        else:
                            await cancel(use_signal)

            finally:
                with trio.CancelScope(shield=True):
                    await trio.sleep(0.5)

                    # all subactors should have de-registered
                    registry = await get_reg()
                    assert len(registry) == extra
                    assert actor.uid in registry
コード例 #14
0
ファイル: root_actor_error.py プロジェクト: goodboy/tractor
async def main():
    async with tractor.open_root_actor(debug_mode=True, ):
        assert 0
コード例 #15
0
    async def main():

        # make sure it all works within the runtime
        async with tractor.open_root_actor():

            num_laggers = 4
            laggers: dict[str, int] = {}
            retries = 3
            size = 100
            tx, rx = trio.open_memory_channel(size)
            brx = broadcast_receiver(rx, size)

            async def sub_and_print(
                delay: float,
            ) -> None:

                task = current_task()
                start = time.time()

                async with brx.subscribe() as lbrx:
                    while True:
                        print(f'{task.name}: starting consume loop')
                        try:
                            async for value in lbrx:
                                print(f'{task.name}: {value}')
                                await trio.sleep(delay)

                            if task.name == 'sub_1':
                                # trigger checkpoint to clean out other subs
                                await trio.sleep(0.01)

                                # the non-lagger got
                                # a ``trio.EndOfChannel``
                                # because the ``tx`` below was closed
                                assert len(lbrx._state.subs) == 1

                                await lbrx.aclose()

                                assert len(lbrx._state.subs) == 0

                        except trio.ClosedResourceError:
                            # only the fast sub will try to re-enter
                            # iteration on the now closed bcaster
                            assert task.name == 'sub_1'
                            return

                        except Lagged:
                            lag_time = time.time() - start
                            lags = laggers[task.name]
                            print(
                                f'restarting slow task {task.name} '
                                f'that bailed out on {lags}:{value} '
                                f'after {lag_time:.3f}')
                            if lags <= retries:
                                laggers[task.name] += 1
                                continue
                            else:
                                print(
                                    f'{task.name} was too slow and terminated '
                                    f'on {lags}:{value}')
                                return

            async with trio.open_nursery() as nursery:

                for i in range(1, num_laggers):

                    task_name = f'sub_{i}'
                    laggers[task_name] = 0
                    nursery.start_soon(
                        partial(
                            sub_and_print,
                            delay=i*0.001,
                        ),
                        name=task_name,
                    )

                # allow subs to sched
                await trio.sleep(0.1)

                async with tx:
                    for i in cycle(range(size)):
                        await tx.send(i)
                        if len(brx._state.subs) == 2:
                            # only one, the non lagger, sub is left
                            break

                # the non-lagger
                assert laggers.pop('sub_1') == 0

                for n, v in laggers.items():
                    assert v == 4

                assert tx._closed
                assert not tx._state.open_send_channels

                # check that "first" bcaster that we created
                # above, never was iterated and is thus overrun
                try:
                    await brx.receive()
                except Lagged:
                    # expect tokio style index truncation
                    seq = brx._state.subs[brx.key]
                    assert seq == len(brx._state.queue) - 1

                # all backpressured entries in the underlying
                # channel should have been copied into the caster
                # queue trailing-window
                async for i in rx:
                    print(f'bped: {i}')
                    assert i in brx._state.queue

                # should be noop
                await brx.aclose()
コード例 #16
0
async def main():

    async with tractor.open_root_actor(debug_mode=True, ):
        while True:
            await tractor.breakpoint()