async def spawn(is_arbiter): namespaces = [__name__] await trio.sleep(0.1) actor = tractor.current_actor() assert actor.is_arbiter == is_arbiter assert actor.statespace == statespace if actor.is_arbiter: async with tractor.open_nursery() as nursery: # forks here portal = await nursery.run_in_actor( 'sub-actor', spawn, is_arbiter=False, statespace=statespace, rpc_module_paths=namespaces, ) assert len(nursery._children) == 1 assert portal.channel.uid in tractor.current_actor()._peers # be sure we can still get the result result = await portal.result() assert result == 10 return result else: return 10
async def spawn(is_arbiter, data, arb_addr): namespaces = [__name__] await trio.sleep(0.1) async with tractor.open_root_actor(arbiter_addr=arb_addr, ): actor = tractor.current_actor() assert actor.is_arbiter == is_arbiter data = data_to_pass_down if actor.is_arbiter: async with tractor.open_nursery() as nursery: # forks here portal = await nursery.run_in_actor( spawn, is_arbiter=False, name='sub-actor', data=data, arb_addr=arb_addr, enable_modules=namespaces, ) assert len(nursery._children) == 1 assert portal.channel.uid in tractor.current_actor()._peers # be sure we can still get the result result = await portal.result() assert result == 10 return result else: return 10
def handler(signum, frame, *args): """Specialized debugger compatible SIGINT handler. In childred we always ignore to avoid deadlocks since cancellation should always be managed by the parent supervising actor. The root is always cancelled on ctrl-c. """ if is_root_process(): tractor.current_actor().cancel_soon() else: print("tractor ignores SIGINT while in debug mode\n" "If you have a special need for it please open an issue.\n")
async def get_cached_feed( brokername: str, ) -> BrokerFeed: """Get/create a ``BrokerFeed`` from/in the current actor. """ # check if a cached client is in the local actor's statespace ss = tractor.current_actor().statespace feeds = ss.setdefault('feeds', {'_lock': trio.Lock()}) lock = feeds['_lock'] feed = None try: async with lock: feed = feeds[brokername] log.info(f"Subscribing with existing `{brokername}` daemon") yield feed except KeyError: async with lock: log.info(f"Creating new client for broker {brokername}") brokermod = get_brokermod(brokername) exit_stack = contextlib.AsyncExitStack() client = await exit_stack.enter_async_context( brokermod.get_client()) feed = BrokerFeed( mod=brokermod, client=client, exit_stack=exit_stack, ) feeds[brokername] = feed yield feed finally: if feed is not None: # destroy the API client await feed.exit_stack.aclose()
async def print_loop(): # arbiter is started in-proc if dne assert tractor.current_actor().is_arbiter for i in range(10): nums.append(i) await trio.sleep(0.1)
async def open_feed( name: str, symbols: Sequence[str], loglevel: Optional[str] = None, ) -> AsyncIterator[Dict[str, Any]]: """Open a "data feed" which provides streamed real-time quotes. """ try: mod = get_brokermod(name) except ImportError: mod = get_ingestormod(name) if loglevel is None: loglevel = tractor.current_actor().loglevel async with maybe_spawn_brokerd( mod.name, loglevel=loglevel, ) as portal: stream = await portal.run( mod.__name__, 'stream_quotes', symbols=symbols, topics=symbols, ) # Feed is required to deliver an initial quote asap. # TODO: should we timeout and raise a more explicit error? # with trio.fail_after(5): with trio.fail_after(float('inf')): # Retreive initial quote for each symbol # such that consumer code can know the data layout first_quote = await stream.__anext__() log.info(f"Received first quote {first_quote}") yield (first_quote, stream)
async def consumer( subs: List[str], ) -> None: uid = tractor.current_actor().uid async with tractor.wait_for_actor('publisher') as portal: async with portal.open_context(subscribe) as (ctx, first): async with ctx.open_stream() as stream: # flip between the provided subs dynamically if len(subs) > 1: for sub in itertools.cycle(subs): print(f'setting dynamic sub to {sub}') await stream.send([sub]) count = 0 async for value in stream: print(f'{uid} got: {value}') if count > 5: break count += 1 else: # static sub await stream.send(subs) async for value in stream: print(f'{uid} got: {value}')
async def test_reg_then_unreg(arb_addr): actor = tractor.current_actor() assert actor.is_arbiter assert len(actor._registry) == 1 # only self is registered async with tractor.open_nursery() as n: portal = await n.start_actor('actor', rpc_module_paths=[__name__]) uid = portal.channel.uid async with tractor.get_arbiter(*arb_addr) as aportal: # this local actor should be the arbiter assert actor is aportal.actor async with tractor.wait_for_actor('actor'): # sub-actor uid should be in the registry assert uid in aportal.actor._registry sockaddrs = actor._registry[uid] # XXX: can we figure out what the listen addr will be? assert sockaddrs await n.cancel() # tear down nursery await trio.sleep(0.1) assert uid not in aportal.actor._registry sockaddrs = actor._registry[uid] assert not sockaddrs
async def wrapper_mngr( ): from tractor.trionics import broadcast_receiver global _cached_stream in_aio = tractor.current_actor().is_infected_aio() if in_aio: if _cached_stream: from_aio = _cached_stream # if we already have a cached feed deliver a rx side clone # to consumer async with broadcast_receiver(from_aio, 6) as from_aio: yield from_aio return else: async with tractor.to_asyncio.open_channel_from( aio_streamer, ) as (first, from_aio): assert not first # cache it so next task uses broadcast receiver _cached_stream = from_aio yield from_aio else: async with aclosing(trio_streamer()) as stream: # cache it so next task uses broadcast receiver _cached_stream = stream yield stream
async def asyncio_actor( target: str, expect_err: Optional[Exception] = None ) -> None: assert tractor.current_actor().is_infected_aio() target = globals()[target] if '.' in expect_err: modpath, _, name = expect_err.rpartition('.') mod = importlib.import_module(modpath) error_type = getattr(mod, name) else: # toplevel builtin error type error_type = builtins.__dict__.get(expect_err) try: # spawn an ``asyncio`` task to run a func and return result await tractor.to_asyncio.run_task(target) except BaseException as err: if expect_err: assert isinstance(err, error_type) raise
async def test_self_is_registered(arb_addr): "Verify waiting on the arbiter to register itself using the standard api." actor = tractor.current_actor() assert actor.is_arbiter with trio.fail_after(0.2): async with tractor.wait_for_actor('root') as portal: assert portal.channel.uid[0] == 'root'
async def open_actor_cluster( modules: list[str], count: int = cpu_count(), names: Optional[list[str]] = None, start_method: Optional[str] = None, hard_kill: bool = False, ) -> AsyncGenerator[dict[str, tractor.Portal], None, ]: portals: dict[str, tractor.Portal] = {} if not names: names = [f'worker_{i}' for i in range(count)] if not len(names) == count: raise ValueError( 'Number of names is {len(names)} but count it {count}') async with tractor.open_nursery(start_method=start_method) as an: async with trio.open_nursery() as n: uid = tractor.current_actor().uid async def _start(name: str) -> None: name = f'{uid[0]}.{name}' portals[name] = await an.start_actor( enable_modules=modules, name=name, ) for name in names: n.start_soon(_start, name) assert len(portals) == count yield portals await an.cancel(hard_kill=hard_kill)
async def _trio_run_client_method( method: str, **kwargs, ) -> None: """Asyncio entry point to run tasks against the ``ib_insync`` api. """ ca = tractor.current_actor() assert ca.is_infected_aio() # if the method is an *async gen* stream for it meth = getattr(Client, method) if inspect.isasyncgenfunction(meth): kwargs['_treat_as_stream'] = True # if the method is an *async func* but manually # streams back results, make sure to also stream it args = tuple(inspect.getfullargspec(meth).args) if 'to_trio' in args: kwargs['_treat_as_stream'] = True result = await tractor.to_asyncio.run_task(_aio_run_client_method, meth=method, **kwargs) return result
async def _setup_persistent_brokerd( ctx: tractor.Context, brokername: str, ) -> None: ''' Allocate a actor-wide service nursery in ``brokerd`` such that feeds can be run in the background persistently by the broker backend as needed. ''' get_console_log(tractor.current_actor().loglevel) global _bus assert not _bus async with trio.open_nursery() as service_nursery: # assign a nursery to the feeds bus for spawning # background tasks from clients get_feed_bus(brokername, service_nursery) # unblock caller await ctx.started() # we pin this task to keep the feeds manager active until the # parent actor decides to tear it down await trio.sleep_forever()
async def test_self_is_registered_localportal(arb_addr): "Verify waiting on the arbiter to register itself using a local portal." actor = tractor.current_actor() assert actor.is_arbiter async with tractor.get_arbiter(*arb_addr) as portal: assert isinstance(portal, tractor._portal.LocalPortal) sockaddr = await portal.run('self', 'wait_for_actor', name='arbiter') assert sockaddr[0] == arb_addr
async def open_feed( name: str, symbols: Sequence[str], loglevel: Optional[str] = None, ) -> AsyncIterator[Dict[str, Any]]: """Open a "data feed" which provides streamed real-time quotes. """ try: mod = get_brokermod(name) except ImportError: mod = get_ingestormod(name) if loglevel is None: loglevel = tractor.current_actor().loglevel # Attempt to allocate (or attach to) shm array for this broker/symbol shm, opened = maybe_open_shm_array( key=sym_to_shm_key(name, symbols[0]), # use any broker defined ohlc dtype: dtype=getattr(mod, '_ohlc_dtype', base_iohlc_dtype), # we expect the sub-actor to write readonly=True, ) async with maybe_spawn_brokerd( mod.name, loglevel=loglevel, ) as portal: stream = await portal.run( mod.__name__, 'stream_quotes', symbols=symbols, shm_token=shm.token, # compat with eventual ``tractor.msg.pub`` topics=symbols, ) # TODO: we can't do this **and** be compate with # ``tractor.msg.pub``, should we maybe just drop this after # tests are in? shm_token, is_writer = await stream.receive() if opened: assert is_writer log.info("Started shared mem bar writer") shm_token['dtype_descr'] = list(shm_token['dtype_descr']) assert shm_token == shm.token # sanity yield Feed( name=name, stream=stream, shm=shm, _broker_portal=portal, )
async def list_services(): async with tractor.get_arbiter( *tractor.current_actor()._arb_addr) as portal: registry = await portal.run('self', 'get_registry') json_d = {} for uid, socket in registry.items(): name, uuid = uid host, port = socket json_d[f'{name}.{uuid}'] = f'{host}:{port}' click.echo(f"Available `piker` services:\n{colorize_json(json_d)}")
async def main(): assert not tractor.current_actor().is_arbiter async with tractor.open_nursery() as n: p1 = await n.start_actor('doggy') p2 = await n.start_actor('doggy') async with tractor.wait_for_actor('doggy') as portal: assert portal.channel.uid in (p2.channel.uid, p1.channel.uid) await n.cancel()
async def pubber(get_topics, seed=10): ss = tractor.current_actor().statespace for i in cycle(range(seed)): # ensure topic subscriptions are as expected ss['get_topics'] = get_topics yield {'even' if is_even(i) else 'odd': i} await trio.sleep(0.1)
async def stream_quotes( ctx: tractor.Context, # marks this as a streaming func symbols: List[str], feed_type: str = 'stock', diff_cached: bool = True, rate: int = 3, loglevel: str = None, # feed_type: str = 'stock', ) -> AsyncGenerator[str, Dict[str, Any]]: # XXX: why do we need this again? get_console_log(tractor.current_actor().loglevel) async with get_cached_client('questrade') as client: if feed_type == 'stock': formatter = format_stock_quote get_quotes = await stock_quoter(client, symbols) # do a smoke quote (note this mutates the input list and filters # out bad symbols for now) payload = await smoke_quote(get_quotes, list(symbols)) else: formatter = format_option_quote get_quotes = await option_quoter(client, symbols) # packetize payload = { quote['symbol']: quote for quote in await get_quotes(symbols) } sd = await client.symbol_info(symbols) # push initial smoke quote response for client initialization await ctx.send_yield(payload) from .data import stream_poll_requests await stream_poll_requests( # ``msg.pub`` required kwargs task_name=feed_type, ctx=ctx, topics=symbols, packetizer=partial( packetizer, formatter=formatter, symbol_data=sd, ), # actual target "streaming func" args get_quotes=get_quotes, diff_cached=diff_cached, rate=rate, ) log.info("Terminating stream quoter task")
async def test_cancel_remote_arbiter(daemon, arb_addr): assert not tractor.current_actor().is_arbiter async with tractor.get_arbiter(*arb_addr) as portal: await portal.cancel_actor() time.sleep(0.1) # the arbiter channel server is cancelled but not its main task assert daemon.returncode is None # no arbiter socket should exist with pytest.raises(OSError): async with tractor.get_arbiter(*arb_addr) as portal: pass
async def stream_symbol_selection(): """An RPC async gen for streaming the symbol corresponding value corresponding to the last clicked row. Essentially of an event stream of clicked symbol values. """ widgets = tractor.current_actor().statespace['widgets'] table = widgets['table'] send_chan, recv_chan = trio.open_memory_channel(0) table._click_queues.append(send_chan) try: async with recv_chan: async for symbol in recv_chan: yield symbol finally: table._click_queues.remove(send_chan)
async def main(): actor = tractor.current_actor() assert actor.is_arbiter # spawn a subactor which calls us back async with tractor.open_nursery() as n: await n.run_in_actor( 'subactor', sleep_back_actor, actor_name=subactor_requests_to, # function from the local exposed module space # the subactor will invoke when it RPCs back to this actor func_name=funcname, exposed_mods=exposed_mods, func_defined=True if func_defined else False, rpc_module_paths=subactor_exposed_mods, )
def open_shm_array( key: Optional[str] = None, # approx number of 5s bars in a "day" x2 size: int = int(2 * 60 * 60 * 10 / 5), dtype: Optional[np.dtype] = None, readonly: bool = False, ) -> ShmArray: """Open a memory shared ``numpy`` using the standard library. This call unlinks (aka permanently destroys) the buffer on teardown and thus should be used from the parent-most accessor (process). """ # create new shared mem segment for which we # have write permission a = np.zeros(size, dtype=dtype) shm = shared_memory.SharedMemory(name=key, create=True, size=a.nbytes) array = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf) array[:] = a[:] array.setflags(write=int(not readonly)) token = _make_token(key=key, dtype=dtype) counter = SharedInt( token=token.shm_counter_name, create=True, ) counter.value = 0 shmarr = ShmArray( array, counter, shm, readonly=readonly, ) assert shmarr._token == token _known_tokens[key] = shmarr.token # "unlink" created shm on process teardown by # pushing teardown calls onto actor context stack actor = tractor.current_actor() actor._lifetime_stack.callback(shmarr.close) actor._lifetime_stack.callback(shmarr.destroy) return shmarr
async def spawn_and_error(breadth, depth) -> None: name = tractor.current_actor().name async with tractor.open_nursery() as nursery: for i in range(breadth): if depth > 0: args = (spawn_and_error, ) kwargs = { 'name': f'spawner_{i}_depth_{depth}', 'breadth': breadth, 'depth': depth - 1, } else: args = (assert_err, ) kwargs = { 'name': f'{name}_errorer_{i}', } await nursery.run_in_actor(*args, **kwargs)
async def maybe_open_runtime( loglevel: Optional[str] = None, **kwargs, ) -> None: """ Start the ``tractor`` runtime (a root actor) if none exists. """ settings = _tractor_kwargs settings.update(kwargs) if not tractor.current_actor(err_on_no_runtime=False): async with tractor.open_root_actor( loglevel=loglevel, **settings, ): yield else: yield
def attach_shm_array( token: Tuple[str, str, Tuple[str, str]], size: int = int(60 * 60 * 10 / 5), readonly: bool = True, ) -> ShmArray: """Load and attach to an existing shared memory array previously created by another process using ``open_shared_array``. """ token = _Token.from_msg(token) key = token.shm_name if key in _known_tokens: assert _known_tokens[key] == token, "WTF" shm = shared_memory.SharedMemory(name=key) shmarr = np.ndarray((size, ), dtype=token.dtype_descr, buffer=shm.buf) shmarr.setflags(write=int(not readonly)) counter = SharedInt(token=token.shm_counter_name) # make sure we can read counter.value sha = ShmArray( shmarr, counter, shm, readonly=readonly, ) # read test sha.array # Stash key -> token knowledge for future queries # via `maybe_opepn_shm_array()` but only after we know # we can attach. if key not in _known_tokens: _known_tokens[key] = token # "close" attached shm on process teardown actor = tractor.current_actor() actor._lifetime_stack.callback(sha.close) return sha
async def open_piker_runtime( name: str, enable_modules: list[str] = [], start_method: str = 'trio', loglevel: Optional[str] = None, # XXX: you should pretty much never want debug mode # for data daemons when running in production. debug_mode: bool = False, ) -> Optional[tractor._portal.Portal]: ''' Start a piker actor who's runtime will automatically sync with existing piker actors in local network based on configuration. ''' global _services assert _services is None # XXX: this may open a root actor as well async with ( tractor.open_root_actor( # passed through to ``open_root_actor`` arbiter_addr=_registry_addr, name=name, loglevel=loglevel, debug_mode=debug_mode, start_method=start_method, # TODO: eventually we should be able to avoid # having the root have more then permissions to # spawn other specialized daemons I think? enable_modules=_root_modules, ) as _, ): yield tractor.current_actor()
async def _aio_get_client( host: str = '127.0.0.1', port: int = None, client_id: Optional[int] = None, ) -> Client: """Return an ``ib_insync.IB`` instance wrapped in our client API. """ if client_id is None: # if this is a persistent brokerd, try to allocate a new id for # each client try: ss = tractor.current_actor().statespace client_id = next(ss.setdefault('client_ids', itertools.count())) # TODO: in case the arbiter has no record # of existing brokerd we need to broadcase for one. except RuntimeError: # tractor likely isn't running client_id = 1 ib = NonShittyIB() ports = _try_ports if port is None else [port] _err = None for port in ports: try: await ib.connectAsync(host, port, clientId=client_id) break except ConnectionRefusedError as ce: _err = ce log.warning(f'Failed to connect on {port}') else: raise ConnectionRefusedError(_err) try: yield Client(ib) except BaseException: ib.disconnect() raise
async def get_cached_client( brokername: str, *args, **kwargs, ) -> 'Client': """Get a cached broker client from the current actor's local vars. If one has not been setup do it and cache it. """ # check if a cached client is in the local actor's statespace ss = tractor.current_actor().statespace clients = ss.setdefault('clients', {'_lock': trio.Lock()}) lock = clients['_lock'] client = None try: log.info(f"Loading existing `{brokername}` daemon") async with lock: client = clients[brokername] client._consumers += 1 yield client except KeyError: log.info(f"Creating new client for broker {brokername}") async with lock: brokermod = get_brokermod(brokername) exit_stack = contextlib.AsyncExitStack() client = await exit_stack.enter_async_context( brokermod.get_client()) client._consumers = 0 client._exit_stack = exit_stack clients[brokername] = client yield client finally: client._consumers -= 1 if client._consumers <= 0: # teardown the client await client._exit_stack.aclose()