async def attach_to_sleep_forever(): ''' Cancel a context **before** any underlying error is raised in order to trigger a local reception of a ``ContextCancelled`` which **should not** be re-raised in the local surrounding ``Context`` *iff* the cancel was requested by **this** side of the context. ''' async with tractor.wait_for_actor('sleeper') as p2: async with ( p2.open_context(sleep_forever) as (peer_ctx, first), peer_ctx.open_stream(), ): try: yield finally: # XXX: previously this would trigger local # ``ContextCancelled`` to be received and raised in the # local context overriding any local error due to # logic inside ``_invoke()`` which checked for # an error set on ``Context._error`` and raised it in # under a cancellation scenario. # The problem is you can have a remote cancellation # that is part of a local error and we shouldn't raise # ``ContextCancelled`` **iff** we weren't the side of # the context to initiate it, i.e. # ``Context._cancel_called`` should **NOT** have been # set. The special logic to handle this case is now # inside ``Context._may_raise_from_remote_msg()`` XD await peer_ctx.cancel()
async def test_reg_then_unreg(arb_addr): actor = tractor.current_actor() assert actor.is_arbiter assert len(actor._registry) == 1 # only self is registered async with tractor.open_nursery() as n: portal = await n.start_actor('actor', rpc_module_paths=[__name__]) uid = portal.channel.uid async with tractor.get_arbiter(*arb_addr) as aportal: # this local actor should be the arbiter assert actor is aportal.actor async with tractor.wait_for_actor('actor'): # sub-actor uid should be in the registry assert uid in aportal.actor._registry sockaddrs = actor._registry[uid] # XXX: can we figure out what the listen addr will be? assert sockaddrs await n.cancel() # tear down nursery await trio.sleep(0.1) assert uid not in aportal.actor._registry sockaddrs = actor._registry[uid] assert not sockaddrs
async def test_required_args(callwith_expecterror): func, kwargs, err = callwith_expecterror if err is not None: with pytest.raises(err): await func(**kwargs) else: async with tractor.open_nursery() as n: portal = await n.start_actor( name='pubber', enable_modules=[__name__], ) async with tractor.wait_for_actor('pubber'): pass await trio.sleep(0.5) async with portal.open_stream_from(multilock_pubber, **kwargs) as stream: async for val in stream: assert val == {'doggy': 10} await portal.cancel_actor()
async def test_self_is_registered(arb_addr): "Verify waiting on the arbiter to register itself using the standard api." actor = tractor.current_actor() assert actor.is_arbiter with trio.fail_after(0.2): async with tractor.wait_for_actor('root') as portal: assert portal.channel.uid[0] == 'root'
async def _async_main( symbol: str, brokername: str, rate: int = 1, loglevel: str = 'info', test: bool = False ) -> None: '''Launch kivy app + all other related tasks. This is started with cli cmd `piker options`. ''' if loglevel is not None: get_console_log(loglevel) brokermod = get_brokermod(brokername) async with trio.open_nursery() as nursery: # get a portal to the data feed daemon async with tractor.wait_for_actor('brokerd') as portal: # set up a pager view for large ticker lists chain = await new_chain_ui( portal, symbol, brokermod, rate=rate, ) async with chain.open_rt_display(nursery, symbol): try: await async_runTouchApp(chain.widgets['root']) finally: if chain._quote_gen: await chain._quote_gen.aclose() # cancel GUI update task nursery.cancel_scope.cancel()
async def consumer( subs: List[str], ) -> None: uid = tractor.current_actor().uid async with tractor.wait_for_actor('publisher') as portal: async with portal.open_context(subscribe) as (ctx, first): async with ctx.open_stream() as stream: # flip between the provided subs dynamically if len(subs) > 1: for sub in itertools.cycle(subs): print(f'setting dynamic sub to {sub}') await stream.send([sub]) count = 0 async for value in stream: print(f'{uid} got: {value}') if count > 5: break count += 1 else: # static sub await stream.send(subs) async for value in stream: print(f'{uid} got: {value}')
async def main(): async with tractor.open_nursery() as n: portal = await n.start_actor( 'streamer', rpc_module_paths=[__name__], ) async with tractor.wait_for_actor('streamer'): # block until 2nd actor is initialized pass async with trio.open_nursery() as tn: agen = await tn.start(subs, ['even'], 'streamer') await trio.sleep(0.1) tn.start_soon(subs, ['even'], 'streamer') # XXX this will trigger the python bug: # https://bugs.python.org/issue32526 # if using async generators to wrap tractor channels await agen.aclose() await trio.sleep(0.1) tn.start_soon(subs, ['even'], 'streamer') await trio.sleep(0.1) tn.start_soon(subs, ['even'], 'streamer') await portal.cancel_actor()
async def subs( which, pub_actor_name, seed=10, task_status=trio.TASK_STATUS_IGNORED, ): if len(which) == 1: if which[0] == 'even': pred = is_even else: def pred(i): return not is_even(i) else: def pred(i): return isinstance(i, int) # TODO: https://github.com/goodboy/tractor/issues/207 async with tractor.wait_for_actor(pub_actor_name) as portal: assert portal async with portal.open_stream_from( pubber, topics=which, seed=seed, ) as stream: task_status.started(stream) times = 10 count = 0 await stream.__anext__() async for pkt in stream: for topic, value in pkt.items(): assert pred(value) count += 1 if count >= times: break await stream.aclose() async with portal.open_stream_from( pubber, topics=['odd'], seed=seed, ) as stream: await stream.__anext__() count = 0 # async with aclosing(stream) as stream: try: async for pkt in stream: for topic, value in pkt.items(): pass # assert pred(value) count += 1 if count >= times: break finally: await stream.aclose()
async def main(): assert not tractor.current_actor().is_arbiter async with tractor.open_nursery() as n: p1 = await n.start_actor('doggy') p2 = await n.start_actor('doggy') async with tractor.wait_for_actor('doggy') as portal: assert portal.channel.uid in (p2.channel.uid, p1.channel.uid) await n.cancel()
async def main(service_name): async with tractor.open_nursery() as an: await an.start_actor(service_name) async with tractor.get_arbiter('127.0.0.1', 1616) as portal: print(f"Arbiter is listening on {portal.channel}") async with tractor.wait_for_actor(service_name) as sockaddr: print(f"my_service is found at {sockaddr}") await an.cancel()
async def test_required_args(callwith_expecterror): func, kwargs, err = callwith_expecterror if err is not None: with pytest.raises(err): await func(**kwargs) else: async with tractor.open_nursery() as n: # await func(**kwargs) portal = await n.run_in_actor('pubber', multilock_pubber, **kwargs) async with tractor.wait_for_actor('pubber'): pass await trio.sleep(0.5) async for val in await portal.result(): assert val == {'doggy': 10}
async def maybe_spawn_brokerd( brokername: str, sleep: float = 0.5, loglevel: Optional[str] = None, expose_mods: List = [], **tractor_kwargs, ) -> tractor._portal.Portal: """If no ``brokerd.{brokername}`` daemon-actor can be found, spawn one in a local subactor and return a portal to it. """ if loglevel: get_console_log(loglevel) # disable debugger in brokerd? # tractor._state._runtime_vars['_debug_mode'] = False tractor_kwargs['loglevel'] = loglevel brokermod = get_brokermod(brokername) dname = f'brokerd.{brokername}' async with tractor.find_actor(dname) as portal: # WTF: why doesn't this work? if portal is not None: yield portal else: # no daemon has been spawned yet log.info(f"Spawning {brokername} broker daemon") tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {}) async with tractor.open_nursery() as nursery: try: # spawn new daemon portal = await nursery.start_actor( dname, enable_modules=_data_mods + [brokermod.__name__], loglevel=loglevel, **tractor_kwargs) async with tractor.wait_for_actor(dname) as portal: yield portal finally: # client code may block indefinitely so cancel when # teardown is invoked await nursery.cancel()
async def main(): ss = tractor.current_actor().statespace async with tractor.open_nursery() as n: name = 'arbiter' if pub_actor == 'streamer': # start the publisher as a daemon master_portal = await n.start_actor( 'streamer', rpc_module_paths=[__name__], ) even_portal = await n.run_in_actor('evens', subs, which=['even'], pub_actor_name=name) odd_portal = await n.run_in_actor('odds', subs, which=['odd'], pub_actor_name=name) async with tractor.wait_for_actor('evens'): # block until 2nd actor is initialized pass if pub_actor == 'arbiter': # wait for publisher task to be spawned in a local RPC task while not ss.get('get_topics'): await trio.sleep(0.1) get_topics = ss.get('get_topics') assert 'even' in get_topics() async with tractor.wait_for_actor('odds'): # block until 2nd actor is initialized pass if pub_actor == 'arbiter': start = time.time() while 'odd' not in get_topics(): await trio.sleep(0.1) if time.time() - start > 1: pytest.fail("odds subscription never arrived?") # TODO: how to make this work when the arbiter gets # a portal to itself? Currently this causes a hang # when the channel server is torn down due to a lingering # loopback channel # with trio.move_on_after(1): # await subs(['even', 'odd']) # XXX: this would cause infinite # blocking due to actor never terminating loop # await even_portal.result() await trio.sleep(0.5) await even_portal.cancel_actor() await trio.sleep(0.5) if pub_actor == 'arbiter': assert 'even' not in get_topics() await odd_portal.cancel_actor() await trio.sleep(1) if pub_actor == 'arbiter': while get_topics(): await trio.sleep(0.1) if time.time() - start > 1: pytest.fail("odds subscription never dropped?") else: await master_portal.cancel_actor()
async def stream_and_route(ctx, ui_name): """Order router (sub)actor entrypoint. This is the daemon (child) side routine which starts an EMS runtime per broker/feed and and begins streaming back alerts from executions back to subscribers. """ actor = tractor.current_actor() book = get_book() _active_execs: Dict[str, (str, str)] = {} # new router entry point async with tractor.wait_for_actor(ui_name) as portal: # spawn one task per broker feed async with trio.open_nursery() as n: async for cmd in await portal.run(send_order_cmds): log.info(f'{cmd} received in {actor.uid}') msg = cmd['msg'] oid = cmd['oid'] if msg == 'cancel': # destroy exec pred, name, cmd = book.orders[_active_execs[oid]].pop(oid) # ack-cmdond that order is live await ctx.send_yield({'msg': 'cancelled', 'oid': oid}) continue elif msg in ( 'alert', 'buy', 'sell', ): trigger_price = cmd['price'] sym = cmd['symbol'] brokers = cmd['brokers'] broker = brokers[0] last = book.lasts.get((broker, sym)) if last is None: # spawn new brokerd feed task quote = await n.start( exec_orders, ctx, # TODO: eventually support N-brokers broker, sym, trigger_price, ) print(f"received first quote {quote}") last = book.lasts[(broker, sym)] print(f'Known last is {last}') # Auto-gen scanner predicate: # we automatically figure out what the alert check # condition should be based on the current first # price received from the feed, instead of being # like every other shitty tina platform that makes # the user choose the predicate operator. pred, name = mk_check(trigger_price, last) # create list of executions on first entry book.orders.setdefault((broker, sym), {})[oid] = (pred, name, cmd) # reverse lookup for cancellations _active_execs[oid] = (broker, sym) # ack-cmdond that order is live await ctx.send_yield({'msg': 'active', 'oid': oid})
async def test_self_is_registered(): "Verify waiting on the arbiter to register itself using the standard api." actor = tractor.current_actor() assert actor.is_arbiter async with tractor.wait_for_actor('arbiter') as portal: assert portal.channel.uid[0] == 'arbiter'
async def say_hello(other_actor): async with tractor.wait_for_actor(other_actor) as portal: return await portal.run(hi)
async def maybe_spawn_daemon( service_name: str, service_task_target: Callable, spawn_args: dict[str, Any], loglevel: Optional[str] = None, **kwargs, ) -> tractor.Portal: ''' If no ``service_name`` daemon-actor can be found, spawn one in a local subactor and return a portal to it. If this function is called from a non-pikerd actor, the spawned service will persist as long as pikerd does or it is requested to be cancelled. This can be seen as a service starting api for remote-actor clients. ''' if loglevel: get_console_log(loglevel) # serialize access to this section to avoid # 2 or more tasks racing to create a daemon lock = Brokerd.locks[service_name] await lock.acquire() async with find_service(service_name) as portal: if portal is not None: lock.release() yield portal return log.warning(f"Couldn't find any existing {service_name}") # ask root ``pikerd`` daemon to spawn the daemon we need if # pikerd is not live we now become the root of the # process tree async with maybe_open_pikerd( loglevel=loglevel, **kwargs, ) as pikerd_portal: if pikerd_portal is None: # we are the root and thus are `pikerd` # so spawn the target service directly by calling # the provided target routine. # XXX: this assumes that the target is well formed and will # do the right things to setup both a sub-actor **and** call # the ``_Services`` api from above to start the top level # service task for that actor. await service_task_target(**spawn_args) else: # tell the remote `pikerd` to start the target, # the target can't return a non-serializable value # since it is expected that service startingn is # non-blocking and the target task will persist running # on `pikerd` after the client requesting it's start # disconnects. await pikerd_portal.run( service_task_target, **spawn_args, ) async with tractor.wait_for_actor(service_name) as portal: lock.release() yield portal await portal.cancel_actor()
async def spawn_and_check_registry( arb_addr: tuple, use_signal: bool, remote_arbiter: bool = False, with_streaming: bool = False, ) -> None: async with tractor.open_root_actor(arbiter_addr=arb_addr, ): async with tractor.get_arbiter(*arb_addr) as portal: # runtime needs to be up to call this actor = tractor.current_actor() if remote_arbiter: assert not actor.is_arbiter if actor.is_arbiter: async def get_reg(): return actor._registry extra = 1 # arbiter is local root actor else: get_reg = partial(portal.run_from_ns, 'self', 'get_registry') extra = 2 # local root actor + remote arbiter # ensure current actor is registered registry = await get_reg() assert actor.uid in registry try: async with tractor.open_nursery() as n: async with trio.open_nursery() as trion: portals = {} for i in range(3): name = f'a{i}' if with_streaming: portals[name] = await n.start_actor( name=name, enable_modules=[__name__]) else: # no streaming portals[name] = await n.run_in_actor( trio.sleep_forever, name=name) # wait on last actor to come up async with tractor.wait_for_actor(name): registry = await get_reg() for uid in n._children: assert uid in registry assert len(portals) + extra == len(registry) if with_streaming: await trio.sleep(0.1) pts = list(portals.values()) for p in pts[:-1]: trion.start_soon(stream_from, p) # stream for 1 sec trion.start_soon(cancel, use_signal, 1) last_p = pts[-1] await stream_from(last_p) else: await cancel(use_signal) finally: with trio.CancelScope(shield=True): await trio.sleep(0.5) # all subactors should have de-registered registry = await get_reg() assert len(registry) == extra assert actor.uid in registry
async def say_hello_use_wait(other_actor): async with tractor.wait_for_actor(other_actor) as portal: assert portal is not None result = await portal.run(__name__, 'hi') return result
async def say_hello(other_actor): async with tractor.wait_for_actor(other_actor) as portal: return await portal.run(_this_module, 'hi')
async def spawn_router_stream_alerts( order_mode, symbol: Symbol, # lines: 'LinesEditor', task_status: TaskStatus[str] = trio.TASK_STATUS_IGNORED, ) -> None: """Spawn an EMS daemon and begin sending orders and receiving alerts. """ actor = tractor.current_actor() subactor_name = 'emsd' # TODO: add ``maybe_spawn_emsd()`` for this async with tractor.open_nursery() as n: portal = await n.start_actor( subactor_name, enable_modules=[__name__], ) stream = await portal.run(stream_and_route, ui_name=actor.name) async with tractor.wait_for_actor(subactor_name): # let parent task continue task_status.started(_to_ems) # begin the trigger-alert stream # this is where we receive **back** messages # about executions **from** the EMS actor async for msg in stream: # delete the line from view oid = msg['oid'] resp = msg['msg'] if resp in ('active', ): print(f"order accepted: {msg}") # show line label once order is live order_mode.lines.commit_line(oid) continue elif resp in ('cancelled', ): # delete level from view order_mode.lines.remove_line(uuid=oid) print(f'deleting line with oid: {oid}') elif resp in ('executed', ): order_mode.lines.remove_line(uuid=oid) print(f'deleting line with oid: {oid}') order_mode.arrows.add( oid, msg['index'], msg['price'], pointing='up' if msg['name'] == 'up' else 'down') # DESKTOP NOTIFICATIONS # # TODO: this in another task? # not sure if this will ever be a bottleneck, # we probably could do graphics stuff first tho? # XXX: linux only for now result = await trio.run_process([ 'notify-send', '-u', 'normal', '-t', '10000', 'piker', f'alert: {msg}', ], ) log.runtime(result)