예제 #1
0
async def inf_streamer(
    ctx: tractor.Context,

) -> None:
    '''
    Stream increasing ints until terminated with a 'done' msg.

    '''
    await ctx.started()

    async with (
        ctx.open_stream() as stream,
        trio.open_nursery() as n,
    ):
        async def bail_on_sentinel():
            async for msg in stream:
                if msg == 'done':
                    await stream.aclose()
                else:
                    print(f'streamer received {msg}')

        # start termination detector
        n.start_soon(bail_on_sentinel)

        for val in itertools.count():
            try:
                await stream.send(val)
            except trio.ClosedResourceError:
                # close out the stream gracefully
                break

    print('terminating streamer')
예제 #2
0
async def simple_rpc(
    ctx: tractor.Context,
    data: int,
) -> None:
    '''
    Test a small ping-pong server.

    '''
    # signal to parent that we're up
    await ctx.started(data + 1)

    print('opening stream in callee')
    async with ctx.open_stream() as stream:

        count = 0
        while True:
            try:
                await stream.receive() == 'ping'
            except trio.EndOfChannel:
                assert count == 10
                break
            else:
                print('pong')
                await stream.send('pong')
                count += 1
예제 #3
0
async def cancel_self(

    ctx: tractor.Context,

) -> None:
    global _state
    _state = True

    await ctx.cancel()

    # should inline raise immediately
    try:
        async with ctx.open_stream():
            pass
    except tractor.ContextCancelled:
        # suppress for now so we can do checkpoint tests below
        pass
    else:
        raise RuntimeError('Context didnt cancel itself?!')

    # check a real ``trio.Cancelled`` is raised on a checkpoint
    try:
        with trio.fail_after(0.1):
            await trio.sleep_forever()
    except trio.Cancelled:
        raise

    except trio.TooSlowError:
        # should never get here
        assert 0
예제 #4
0
async def subscribe(

    ctx: tractor.Context,

) -> None:

    global _registry

    # syn caller
    await ctx.started(None)

    async with ctx.open_stream() as stream:

        # update subs list as consumer requests
        async for new_subs in stream:

            new_subs = set(new_subs)
            remove = new_subs - _registry.keys()

            print(f'setting sub to {new_subs} for {ctx.chan.uid}')

            # remove old subs
            for sub in remove:
                _registry[sub].remove(stream)

            # add new subs for consumer
            for sub in new_subs:
                _registry[sub].add(stream)
예제 #5
0
async def worker(ctx: tractor.Context) -> None:
    await ctx.started()
    async with ctx.open_stream(backpressure=True) as stream:
        async for msg in stream:
            # do something with msg
            print(msg)
            assert msg == MESSAGE
예제 #6
0
async def echo_back_sequence(

    ctx:  tractor.Context,
    seq: list[int],
    msg_buffer_size: Optional[int] = None,

) -> None:
    '''
    Send endlessly on the calleee stream.

    '''
    await ctx.started()
    async with ctx.open_stream(
        msg_buffer_size=msg_buffer_size,
    ) as stream:

        seq = list(seq)  # bleh, `msgpack`...
        count = 0
        while count < 3:
            batch = []
            async for msg in stream:
                batch.append(msg)
                if batch == seq:
                    break

            for msg in batch:
                print(f'callee sending {msg}')
                await stream.send(msg)

            count += 1

        return 'yo'
예제 #7
0
async def iter_ohlc_periods(
    ctx: tractor.Context,
    delay_s: int,
) -> None:
    '''
    Subscribe to OHLC sampling "step" events: when the time
    aggregation period increments, this event stream emits an index
    event.

    '''
    # add our subscription
    subs = sampler.subscribers.setdefault(delay_s, [])
    await ctx.started()
    async with ctx.open_stream() as stream:
        subs.append(stream)

        try:
            # stream and block until cancelled
            await trio.sleep_forever()
        finally:
            try:
                subs.remove(stream)
            except ValueError:
                log.error(
                    f'iOHLC step stream was already dropped {ctx.chan.uid}?')
예제 #8
0
async def one_task_streams_and_one_handles_reqresp(

    ctx: tractor.Context,

) -> None:

    await ctx.started()

    async with ctx.open_stream() as stream:

        async def pingpong():
            '''Run a simple req/response service.

            '''
            async for msg in stream:
                print('rpc server ping')
                assert msg == 'ping'
                print('rpc server pong')
                await stream.send('pong')

        async with trio.open_nursery() as n:
            n.start_soon(pingpong)

            for _ in itertools.count():
                await stream.send('yo')
                await trio.sleep(0.01)
예제 #9
0
async def echo_ctx_stream(
    ctx: tractor.Context,
) -> None:
    await ctx.started()

    async with ctx.open_stream() as stream:
        async for msg in stream:
            await stream.send(msg)
예제 #10
0
파일: _debug.py 프로젝트: goodboy/tractor
async def _hijack_stdin_for_child(ctx: tractor.Context,
                                  subactor_uid: Tuple[str, str]) -> str:
    '''
    Hijack the tty in the root process of an actor tree such that
    the pdbpp debugger console can be allocated to a sub-actor for repl
    bossing.

    '''
    task_name = trio.lowlevel.current_task().name

    # TODO: when we get to true remote debugging
    # this will deliver stdin data?

    log.debug("Attempting to acquire TTY lock\n"
              f"remote task: {task_name}:{subactor_uid}")

    log.debug(f"Actor {subactor_uid} is WAITING on stdin hijack lock")

    with trio.CancelScope(shield=True):

        try:
            lock = None
            async with _acquire_debug_lock(subactor_uid) as lock:

                # indicate to child that we've locked stdio
                await ctx.started('Locked')
                log.debug(f"Actor {subactor_uid} acquired stdin hijack lock")

                # wait for unlock pdb by child
                async with ctx.open_stream() as stream:
                    assert await stream.receive() == 'pdb_unlock'

                # try:
                #     assert await stream.receive() == 'pdb_unlock'

        except (
                # BaseException,
                trio.MultiError,
                trio.BrokenResourceError,
                trio.Cancelled,  # by local cancellation
                trio.ClosedResourceError,  # by self._rx_chan
        ) as err:
            # XXX: there may be a race with the portal teardown
            # with the calling actor which we can safely ignore.
            # The alternative would be sending an ack message
            # and allowing the client to wait for us to teardown
            # first?
            if lock and lock.locked():
                lock.release()

            if isinstance(err, trio.Cancelled):
                raise
        finally:
            log.debug("TTY lock released, remote task:"
                      f"{task_name}:{subactor_uid}")

    return "pdb_unlock_complete"
예제 #11
0
async def close_ctx_immediately(

    ctx: tractor.Context,

) -> None:

    await ctx.started()
    global _state

    async with ctx.open_stream():
        pass
예제 #12
0
async def not_started_but_stream_opened(
    ctx: tractor.Context,
) -> None:
    '''
    Enter ``Context.open_stream()`` without calling ``.started()``.

    '''
    try:
        async with ctx.open_stream():
            assert 0
    except RuntimeError:
        raise
예제 #13
0
async def streamer(
    ctx: tractor.Context,
    seq: list[int] = list(range(1000)),
) -> None:

    await ctx.started()
    async with ctx.open_stream() as stream:
        for val in seq:
            await stream.send(val)
            await trio.sleep(0.001)

    print('producer finished')
예제 #14
0
async def trio_to_aio_echo_server(
    ctx: tractor.Context,
):

    async def aio_echo_server(
        to_trio: trio.MemorySendChannel,
        from_trio: asyncio.Queue,
    ) -> None:

        to_trio.send_nowait('start')

        while True:
            msg = await from_trio.get()

            # echo the msg back
            to_trio.send_nowait(msg)

            # if we get the terminate sentinel
            # break the echo loop
            if msg is None:
                print('breaking aio echo loop')
                break

    async with to_asyncio.open_channel_from(
        aio_echo_server,
    ) as (first, chan):

        assert first == 'start'
        await ctx.started(first)

        async with ctx.open_stream() as stream:

            async for msg in stream:
                print(f'asyncio echoing {msg}')
                await chan.send(msg)

                out = await chan.receive()
                # echo back to parent actor-task
                await stream.send(out)

                if out is None:
                    try:
                        out = await chan.receive()
                    except trio.EndOfChannel:
                        break
                    else:
                        raise RuntimeError('aio channel never stopped?')
예제 #15
0
async def echo_sequences(

    ctx:  tractor.Context,

) -> None:
    '''Bidir streaming endpoint which will stream
    back any sequence it is sent item-wise.

    '''
    await ctx.started()

    async with ctx.open_stream() as stream:
        async for sequence in stream:
            seq = list(sequence)
            for value in seq:
                await stream.send(value)
                print(f'producer sent {value}')
async def trio_to_aio_echo_server(ctx: tractor.Context, ):
    # this will block until the ``asyncio`` task sends a "first"
    # message.
    async with tractor.to_asyncio.open_channel_from(
            aio_echo_server, ) as (first, chan):

        assert first == 'start'
        await ctx.started(first)

        async with ctx.open_stream() as stream:

            async for msg in stream:
                await chan.send(msg)

                out = await chan.receive()
                # echo back to parent actor-task
                await stream.send(out)
예제 #17
0
async def keep_sending_from_callee(

    ctx:  tractor.Context,
    msg_buffer_size: Optional[int] = None,

) -> None:
    '''
    Send endlessly on the calleee stream.

    '''
    await ctx.started()
    async with ctx.open_stream(
        msg_buffer_size=msg_buffer_size,
    ) as stream:
        for msg in count():
            print(f'callee sending {msg}')
            await stream.send(msg)
            await trio.sleep(0.01)
예제 #18
0
async def trades_dialogue(

    ctx: tractor.Context,
    broker: str,
    fqsn: str,
    loglevel: str = None,

) -> None:
    tractor.log.get_console_log(loglevel)

    async with (

        data.open_feed(
            [fqsn],
            loglevel=loglevel,
        ) as feed,

    ):
        # TODO: load paper positions per broker from .toml config file
        # and pass as symbol to position data mapping: ``dict[str, dict]``
        # await ctx.started(all_positions)
        await ctx.started(({}, {'paper',}))

        async with (
            ctx.open_stream() as ems_stream,
            trio.open_nursery() as n,
        ):

            client = PaperBoi(
                broker,
                ems_stream,
                _buys={},
                _sells={},

                _reqids={},

                # TODO: load paper positions from ``positions.toml``
                _positions={},
            )

            n.start_soon(handle_order_requests, client, ems_stream)

            # paper engine simulator clearing task
            await simulate_fills(feed.stream, client)
예제 #19
0
async def expect_cancelled(

    ctx: tractor.Context,

) -> None:
    global _state
    _state = True

    await ctx.started()

    try:
        async with ctx.open_stream() as stream:
            async for msg in stream:
                await stream.send(msg)  # echo server

    except trio.Cancelled:
        # expected case
        _state = False
        raise

    else:
        assert 0, "Wasn't cancelled!?"
예제 #20
0
async def simple_rpc_with_forloop(
    ctx: tractor.Context,
    data: int,
) -> None:
    """Same as previous test but using ``async for`` syntax/api.

    """

    # signal to parent that we're up
    await ctx.started(data + 1)

    print('opening stream in callee')
    async with ctx.open_stream() as stream:

        count = 0
        async for msg in stream:

            assert msg == 'ping'
            print('pong')
            await stream.send('pong')
            count += 1

        else:
            assert count == 10
예제 #21
0
파일: binance.py 프로젝트: pikers/piker
async def open_symbol_search(
    ctx: tractor.Context,
) -> Client:
    async with open_cached_client('binance') as client:

        # load all symbols locally for fast search
        cache = await client.cache_symbols()
        await ctx.started()

        async with ctx.open_stream() as stream:

            async for pattern in stream:
                # results = await client.symbol_info(sym=pattern.upper())

                matches = fuzzy.extractBests(
                    pattern,
                    cache,
                    score_cutoff=50,
                )
                # repack in dict form
                await stream.send(
                    {item[0]['symbol']: item[0]
                     for item in matches}
                )
예제 #22
0
async def simple_rpc(

    ctx: tractor.Context,
    data: int,

) -> None:
    '''Test a small ping-pong 2-way streaming server.

    '''
    # signal to parent that we're up much like
    # ``trio_typing.TaskStatus.started()``
    await ctx.started(data + 1)

    async with ctx.open_stream() as stream:

        count = 0
        async for msg in stream:

            assert msg == 'ping'
            await stream.send('pong')
            count += 1

        else:
            assert count == 10
예제 #23
0
파일: kraken.py 프로젝트: pikers/piker
async def open_symbol_search(
    ctx: tractor.Context,

) -> Client:
    async with open_cached_client('kraken') as client:

        # load all symbols locally for fast search
        cache = await client.cache_symbols()
        await ctx.started(cache)

        async with ctx.open_stream() as stream:

            async for pattern in stream:

                matches = fuzzy.extractBests(
                    pattern,
                    cache,
                    score_cutoff=50,
                )
                # repack in dict form
                await stream.send(
                    {item[0]['altname']: item[0]
                     for item in matches}
                )
예제 #24
0
async def sleep_forever(
    ctx: tractor.Context,
) -> None:
    await ctx.started()
    async with ctx.open_stream():
        await trio.sleep_forever()
예제 #25
0
파일: kraken.py 프로젝트: pikers/piker
async def trades_dialogue(
    ctx: tractor.Context,
    loglevel: str = None,
) -> AsyncIterator[dict[str, Any]]:

    # XXX: required to propagate ``tractor`` loglevel to piker logging
    get_console_log(loglevel or tractor.current_actor().loglevel)

    @acm
    async def subscribe(ws: wsproto.WSConnection, token: str):
        # XXX: setup subs
        # https://docs.kraken.com/websockets/#message-subscribe
        # specific logic for this in kraken's shitty sync client:
        # https://github.com/krakenfx/kraken-wsclient-py/blob/master/kraken_wsclient_py/kraken_wsclient_py.py#L188
        trades_sub = make_auth_sub(
            {'name': 'ownTrades', 'token': token}
        )

        # TODO: we want to eventually allow unsubs which should
        # be completely fine to request from a separate task
        # since internally the ws methods appear to be FIFO
        # locked.
        await ws.send_msg(trades_sub)

        yield

        # unsub from all pairs on teardown
        await ws.send_msg({
            'event': 'unsubscribe',
            'subscription': ['ownTrades'],
        })

        # XXX: do we need to ack the unsub?
        # await ws.recv_msg()

    # Authenticated block
    async with get_client() as client:
        if not client._api_key:
            log.error('Missing Kraken API key: Trades WS connection failed')
            await ctx.started(({}, ['paper']))

            async with (
                ctx.open_stream() as ems_stream,
                trio.open_nursery() as n,
            ):

                client = PaperBoi(
                    'kraken',
                    ems_stream,
                    _buys={},
                    _sells={},

                    _reqids={},

                    # TODO: load paper positions from ``positions.toml``
                    _positions={},
                )

                # TODO: maybe add multiple accounts
                n.start_soon(handle_order_requests, client, ems_stream)

        acc_name = 'kraken.' + client._name
        trades = await client.get_trades()

        position_msgs = pack_positions(acc_name, trades)

        await ctx.started((position_msgs, (acc_name,)))

        # Get websocket token for authenticated data stream
        # Assert that a token was actually received.
        resp = await client.endpoint('GetWebSocketsToken', {})
        assert resp['error'] == []
        token = resp['result']['token']

        async with (
            ctx.open_stream() as ems_stream,
            trio.open_nursery() as n,
        ):
            # TODO: maybe add multiple accounts
            n.start_soon(handle_order_requests, client, ems_stream)

            # Process trades msg stream of ws
            async with open_autorecon_ws(
                'wss://ws-auth.kraken.com/',
                fixture=subscribe,
                token=token,
            ) as ws:
                async for msg in process_trade_msgs(ws):
                    for trade in msg:
                        # check the type of packaged message
                        assert type(trade) == Trade

                        # prepare and send a filled status update
                        filled_msg = BrokerdStatus(
                            reqid=trade.reqid,
                            time_ns=time.time_ns(),

                            account='kraken.spot',
                            status='filled',
                            filled=float(trade.size),
                            reason='Order filled by kraken',
                            broker_details={
                                'name': 'kraken',
                                'broker_time': trade.broker_time
                            },

                            # TODO: figure out if kraken gives a count
                            # of how many units of underlying were
                            # filled. Alternatively we can decrement
                            # this value ourselves by associating and
                            # calcing from the diff with the original
                            # client-side request, see:
                            # https://github.com/pikers/piker/issues/296
                            remaining=0,
                        )

                        await ems_stream.send(filled_msg.dict())

                        # send a fill msg for gui update
                        fill_msg = BrokerdFill(
                            reqid=trade.reqid,
                            time_ns=time.time_ns(),

                            action=trade.action,
                            size=float(trade.size),
                            price=float(trade.price),
                            # TODO: maybe capture more msg data i.e fees?
                            broker_details={'name': 'kraken'},
                            broker_time=float(trade.broker_time)
                        )

                        await ems_stream.send(fill_msg.dict())
예제 #26
0
파일: feed.py 프로젝트: pikers/piker
async def open_feed_bus(

    ctx: tractor.Context,
    brokername: str,
    symbol: str,  # normally expected to the broker-specific fqsn
    loglevel: str,
    tick_throttle:  Optional[float] = None,
    start_stream: bool = True,

) -> None:
    '''
    Open a data feed "bus": an actor-persistent per-broker task-oriented
    data feed registry which allows managing real-time quote streams per
    symbol.

    '''
    if loglevel is None:
        loglevel = tractor.current_actor().loglevel

    # XXX: required to propagate ``tractor`` loglevel to piker logging
    get_console_log(loglevel or tractor.current_actor().loglevel)

    # local state sanity checks
    # TODO: check for any stale shm entries for this symbol
    # (after we also group them in a nice `/dev/shm/piker/` subdir).
    # ensure we are who we think we are
    servicename = tractor.current_actor().name
    assert 'brokerd' in servicename
    assert brokername in servicename

    bus = get_feed_bus(brokername)

    # if no cached feed for this symbol has been created for this
    # brokerd yet, start persistent stream and shm writer task in
    # service nursery
    entry = bus.feeds.get(symbol)
    if entry is None:
        # allocate a new actor-local stream bus which
        # will persist for this `brokerd`'s service lifetime.
        async with bus.task_lock:
            await bus.nursery.start(
                partial(
                    allocate_persistent_feed,

                    bus=bus,
                    brokername=brokername,
                    # here we pass through the selected symbol in native
                    # "format" (i.e. upper vs. lowercase depending on
                    # provider).
                    symbol=symbol,
                    loglevel=loglevel,
                    start_stream=start_stream,
                )
            )
            # TODO: we can remove this?
            assert isinstance(bus.feeds[symbol], tuple)

    # XXX: ``first_quotes`` may be outdated here if this is secondary
    # subscriber
    init_msg, first_quotes = bus.feeds[symbol]

    msg = init_msg[symbol]
    bfqsn = msg['fqsn'].lower()

    # true fqsn
    fqsn = '.'.join([bfqsn, brokername])
    assert fqsn in first_quotes
    assert bus.feeds[bfqsn]

    # broker-ambiguous symbol (provided on cli - eg. mnq.globex.ib)
    bsym = symbol + f'.{brokername}'
    assert bsym in first_quotes

    # we use the broker-specific fqsn (bfqsn) for
    # the sampler subscription since the backend isn't (yet)
    # expected to append it's own name to the fqsn, so we filter
    # on keys which *do not* include that name (e.g .ib) .
    bus._subscribers.setdefault(bfqsn, [])

    # send this even to subscribers to existing feed?
    # deliver initial info message a first quote asap
    await ctx.started((
        init_msg,
        first_quotes,
    ))

    if not start_stream:
        log.warning(f'Not opening real-time stream for {fqsn}')
        await trio.sleep_forever()

    # real-time stream loop
    async with (
        ctx.open_stream() as stream,
    ):
        # re-send to trigger display loop cycle (necessary especially
        # when the mkt is closed and no real-time messages are
        # expected).
        await stream.send({fqsn: first_quotes})

        # open a bg task which receives quotes over a mem chan
        # and only pushes them to the target actor-consumer at
        # a max ``tick_throttle`` instantaneous rate.
        if tick_throttle:
            send, recv = trio.open_memory_channel(2**10)
            cs = await bus.start_task(
                uniform_rate_send,
                tick_throttle,
                recv,
                stream,
            )
            sub = (send, tick_throttle)

        else:
            sub = (stream, tick_throttle)

        subs = bus._subscribers[bfqsn]
        subs.append(sub)

        try:
            uid = ctx.chan.uid

            # ctrl protocol for start/stop of quote streams based on UI
            # state (eg. don't need a stream when a symbol isn't being
            # displayed).
            async for msg in stream:

                if msg == 'pause':
                    if sub in subs:
                        log.info(
                            f'Pausing {fqsn} feed for {uid}')
                        subs.remove(sub)

                elif msg == 'resume':
                    if sub not in subs:
                        log.info(
                            f'Resuming {fqsn} feed for {uid}')
                        subs.append(sub)
                else:
                    raise ValueError(msg)
        finally:
            log.info(
                f'Stopping {symbol}.{brokername} feed for {ctx.chan.uid}')

            if tick_throttle:
                # TODO: a one-cancels-one nursery
                # n.cancel_scope.cancel()
                cs.cancel()
            try:
                bus._subscribers[bfqsn].remove(sub)
            except ValueError:
                log.warning(f'{sub} for {symbol} was already removed?')
예제 #27
0
async def cascade(

    ctx: tractor.Context,

    # data feed key
    fqsn: str,

    src_shm_token: dict,
    dst_shm_token: tuple[str, np.dtype],

    ns_path: NamespacePath,

    shm_registry: dict[str, _Token],

    zero_on_step: bool = False,
    loglevel: Optional[str] = None,

) -> None:
    '''
    Chain streaming signal processors and deliver output to
    destination shm array buffer.

    '''
    profiler = pg.debug.Profiler(
        delayed=False,
        disabled=False
    )

    if loglevel:
        get_console_log(loglevel)

    src = attach_shm_array(token=src_shm_token)
    dst = attach_shm_array(readonly=False, token=dst_shm_token)

    reg = _load_builtins()
    lines = '\n'.join([f'{key.rpartition(":")[2]} => {key}' for key in reg])
    log.info(
        f'Registered FSP set:\n{lines}'
    )

    # update actorlocal flows table which registers
    # readonly "instances" of this fsp for symbol/source
    # so that consumer fsps can look it up by source + fsp.
    # TODO: ugh i hate this wind/unwind to list over the wire
    # but not sure how else to do it.
    for (token, fsp_name, dst_token) in shm_registry:
        Fsp._flow_registry[
            (_Token.from_msg(token), fsp_name)
        ] = _Token.from_msg(dst_token)

    fsp: Fsp = reg.get(
        NamespacePath(ns_path)
    )
    func = fsp.func

    if not func:
        # TODO: assume it's a func target path
        raise ValueError(f'Unknown fsp target: {ns_path}')

    # open a data feed stream with requested broker
    async with data.feed.maybe_open_feed(
        [fqsn],

        # TODO throttle tick outputs from *this* daemon since
        # it'll emit tons of ticks due to the throttle only
        # limits quote arrival periods, so the consumer of *this*
        # needs to get throttled the ticks we generate.
        # tick_throttle=60,

    ) as (feed, quote_stream):
        symbol = feed.symbols[fqsn]

        profiler(f'{func}: feed up')

        assert src.token == feed.shm.token
        # last_len = new_len = len(src.array)

        func_name = func.__name__
        async with (
            trio.open_nursery() as n,
        ):

            fsp_target = partial(

                fsp_compute,
                symbol=symbol,
                feed=feed,
                quote_stream=quote_stream,

                # shm
                src=src,
                dst=dst,

                # target
                func=func
            )

            tracker, index = await n.start(fsp_target)

            if zero_on_step:
                last = dst.array[-1:]
                zeroed = np.zeros(last.shape, dtype=last.dtype)

            profiler(f'{func_name}: fsp up')

            # sync client
            await ctx.started(index)

            # XXX:  rt stream with client which we MUST
            # open here (and keep it open) in order to make
            # incremental "updates" as history prepends take
            # place.
            async with ctx.open_stream() as client_stream:

                # TODO: these likely should all become
                # methods of this ``TaskLifetime`` or wtv
                # abstraction..
                async def resync(
                    tracker: TaskTracker,

                ) -> tuple[TaskTracker, int]:
                    # TODO: adopt an incremental update engine/approach
                    # where possible here eventually!
                    log.warning(f're-syncing fsp {func_name} to source')
                    tracker.cs.cancel()
                    await tracker.complete.wait()
                    tracker, index = await n.start(fsp_target)

                    # always trigger UI refresh after history update,
                    # see ``piker.ui._fsp.FspAdmin.open_chain()`` and
                    # ``piker.ui._display.trigger_update()``.
                    await client_stream.send('update')
                    return tracker, index

                def is_synced(
                    src: ShmArray,
                    dst: ShmArray
                ) -> tuple[bool, int, int]:
                    '''Predicate to dertmine if a destination FSP
                    output array is aligned to its source array.

                    '''
                    step_diff = src.index - dst.index
                    len_diff = abs(len(src.array) - len(dst.array))
                    return not (
                        # the source is likely backfilling and we must
                        # sync history calculations
                        len_diff > 2 or

                        # we aren't step synced to the source and may be
                        # leading/lagging by a step
                        step_diff > 1 or
                        step_diff < 0
                    ), step_diff, len_diff

                async def poll_and_sync_to_step(

                    tracker: TaskTracker,
                    src: ShmArray,
                    dst: ShmArray,

                ) -> tuple[TaskTracker, int]:

                    synced, step_diff, _ = is_synced(src, dst)
                    while not synced:
                        tracker, index = await resync(tracker)
                        synced, step_diff, _ = is_synced(src, dst)

                    return tracker, step_diff

                s, step, ld = is_synced(src, dst)

                # detect sample period step for subscription to increment
                # signal
                times = src.array['time']
                delay_s = times[-1] - times[times != times[-1]][-1]

                # Increment the underlying shared memory buffer on every
                # "increment" msg received from the underlying data feed.
                async with feed.index_stream(
                    int(delay_s)
                ) as istream:

                    profiler(f'{func_name}: sample stream up')
                    profiler.finish()

                    async for _ in istream:

                        # respawn the compute task if the source
                        # array has been updated such that we compute
                        # new history from the (prepended) source.
                        synced, step_diff, _ = is_synced(src, dst)
                        if not synced:
                            tracker, step_diff = await poll_and_sync_to_step(
                                tracker,
                                src,
                                dst,
                            )

                            # skip adding a last bar since we should already
                            # be step alinged
                            if step_diff == 0:
                                continue

                        # read out last shm row, copy and write new row
                        array = dst.array

                        # some metrics like vlm should be reset
                        # to zero every step.
                        if zero_on_step:
                            last = zeroed
                        else:
                            last = array[-1:].copy()

                        dst.push(last)