コード例 #1
0
    async def job(self) -> None:
        LONG_RUNNING_THRESHOLD = 0.1
        CANCEL_THRESHOLD = 10

        async with self._lock:
            job = None  # TODO identify the job
            nursery = None
            manually_cancelled = False
            begin = trio.current_time()
            try:
                async with trio.open_nursery() as nursery:
                    nursery.cancel_scope.deadline = begin + CANCEL_THRESHOLD

                    @nursery.start_soon
                    async def warn_long_running():
                        await trio.sleep(LONG_RUNNING_THRESHOLD)
                        logger.warning("Long running job on server loop: %s", job)

                    yield

                    # cancel the warning task
                    manually_cancelled = True
                    nursery.cancel_scope.cancel()
            finally:
                assert nursery is not None

                end = trio.current_time()
                if nursery.cancel_scope.cancelled_caught and not manually_cancelled:
                    logger.error("Long running job cancelled after %.1f ms: %s", (end - begin) * 1000, job)
                    raise trio.TooSlowError
                elif end - begin > LONG_RUNNING_THRESHOLD:
                    logger.warning("Long running job finished after %.1f ms: %s", (end - begin) * 1000, job)
コード例 #2
0
 async def _result(self):
     begin = trio.current_time()
     try:
         exit, value = False, await self.stream.__anext__()
     except StopAsyncIteration:
         exit, value = True, None
     end = trio.current_time()
     self.index += 1
     return self.index, exit, end - begin, value
コード例 #3
0
ファイル: monitor.py プロジェクト: ziirish/burp-ui
 async def get_mon(self, ident) -> Monitor:
     self.logger.info(f'{ident} - Waiting for a monitor...')
     t1 = trio.current_time()
     mon = await self.pool.get()  # type: Monitor
     t2 = trio.current_time()
     t = t2 - t1
     self.logger.info(f'{ident} - Waited {t:.3f}s')
     yield mon
     self.logger.info(f'{ident} - Releasing monitor')
     await self.pool.put(mon)
コード例 #4
0
async def task2():
    start = trio.current_time()

    print("task2: sleeping for 5 years")
    await trio.sleep(5 * YEAR)

    duration = trio.current_time() - start
    print("task2: woke up; clock says I've slept {} years"
          .format(duration / YEAR))

    print("task2: sleeping for 500 years")
    await trio.sleep(500 * YEAR)

    duration = trio.current_time() - start
    print("task2: slept {} years total".format(duration / YEAR))
コード例 #5
0
ファイル: rpc.py プロジェクト: M-o-a-T/qbroker
    async def run(self, msg):
        if self.call_conv == CC_DICT:
            a = ()
            k = msg.data
            if not isinstance(k, Mapping):
                assert k is None, k
                k = {}
        elif self.call_conv == CC_DATA:
            a = (msg.data,)
            k = {}
        else:
            a = (msg,)
            k = {}

        if self.call_conv == CC_TASK:
            await msg.conn.nursery.start(self._run, self.fn, msg)
        else:
            try:
                res = await coro_wrapper(self.fn, *a, **k)
                if res is not None:
                    await msg.reply(res)
            except Exception as exc:
                await msg.error(exc, _exit=self.debug)
            finally:
                with trio.open_cancel_scope(shield=True, deadline=trio.current_time() + 1):
                    with suppress(AmqpClosedConnection):
                        await msg.aclose()
コード例 #6
0
async def task1():
    start = trio.current_time()

    print("task1: sleeping for 1 year")
    await trio.sleep(YEAR)

    duration = trio.current_time() - start
    print("task1: woke up; clock says I've slept {} years"
          .format(duration / YEAR))

    print("task1: sleeping for 1 year, 100 times")
    for _ in range(100):
        await trio.sleep(YEAR)

    duration = trio.current_time() - start
    print("task1: slept {} years total".format(duration / YEAR))
コード例 #7
0
async def test_multi_success(autojump_clock):
    sock, scenario = await run_scenario(
        80,
        [
            ("1.1.1.1", 0.5, "error"),
            ("2.2.2.2", 10, "success"),
            ("3.3.3.3", 10 - 1, "success"),
            ("4.4.4.4", 10 - 2, "success"),
            ("5.5.5.5", 0.5, "error"),
        ],
        happy_eyeballs_delay=1,
    )
    assert not scenario.sockets["1.1.1.1"].succeeded
    assert scenario.sockets["2.2.2.2"].succeeded
    assert scenario.sockets["3.3.3.3"].succeeded
    assert scenario.sockets["4.4.4.4"].succeeded
    assert not scenario.sockets["5.5.5.5"].succeeded
    assert sock.ip in ["2.2.2.2", "3.3.3.3", "4.4.4.4"]
    assert trio.current_time() == (0.5 + 10)
    assert scenario.connect_times == {
        "1.1.1.1": 0,
        "2.2.2.2": 0.5,
        "3.3.3.3": 1.5,
        "4.4.4.4": 2.5,
        "5.5.5.5": 3.5,
    }
コード例 #8
0
ファイル: mocked.py プロジェクト: PRIArobotics/HedgehogServer
    def get(self, time: float=None, default: T=None)-> T:
        if time is None:
            time = trio.current_time()

        i = bisect.bisect_right(self._times, time)
        if i == 0:
            return default
        return self._values[i - 1]
コード例 #9
0
 async def connect(self, sockaddr):
     self.ip = sockaddr[0]
     self.port = sockaddr[1]
     assert self.ip not in self.scenario.sockets
     self.scenario.sockets[self.ip] = self
     self.scenario.connect_times[self.ip] = trio.current_time()
     delay, result = self.scenario.ip_dict[self.ip]
     await trio.sleep(delay)
     if result == "error":
         raise OSError("sorry")
     self.succeeded = True
コード例 #10
0
ファイル: rpc.py プロジェクト: M-o-a-T/qbroker
 async def _run(self, fn, msg, task_status=trio.TASK_STATUS_IGNORED):
     task_status.started()
     try:
         res = await fn(msg)
     except Exception as exc:
         await msg.error(exc, _exit=self.debug)
     else:
         if res is not None:
             await msg.reply(res)
     finally:
         with trio.open_cancel_scope(shield=True, deadline=trio.current_time() + 1):
             with suppress(AmqpClosedConnection):
                 await msg.aclose()
コード例 #11
0
async def test_early_success(autojump_clock):
    sock, scenario = await run_scenario(
        80,
        [
            ("1.1.1.1", 1, "success"),
            ("2.2.2.2", 0.1, "success"),
            ("3.3.3.3", 0.2, "success"),
        ],
    )
    assert sock.ip == "2.2.2.2"
    assert trio.current_time() == (0.300 + 0.1)
    assert scenario.connect_times == {
        "1.1.1.1": 0,
        "2.2.2.2": 0.300,
        # 3.3.3.3 was never even started
    }
コード例 #12
0
async def test_basic_fallthrough(autojump_clock):
    sock, scenario = await run_scenario(
        80,
        [
            ("1.1.1.1", 1, "success"),
            ("2.2.2.2", 1, "success"),
            ("3.3.3.3", 0.2, "success"),
        ],
    )
    assert sock.ip == "3.3.3.3"
    assert trio.current_time() == (0.300 + 0.300 + 0.2)
    assert scenario.connect_times == {
        "1.1.1.1": 0,
        "2.2.2.2": 0.300,
        "3.3.3.3": 0.600,
    }
コード例 #13
0
async def test_custom_delay(autojump_clock):
    sock, scenario = await run_scenario(
        80,
        [
            ("1.1.1.1", 1, "success"),
            ("2.2.2.2", 1, "success"),
            ("3.3.3.3", 0.2, "success"),
        ],
        happy_eyeballs_delay=0.450,
    )
    assert sock.ip == "1.1.1.1"
    assert trio.current_time() == 1
    assert scenario.connect_times == {
        "1.1.1.1": 0,
        "2.2.2.2": 0.450,
        "3.3.3.3": 0.900,
    }
コード例 #14
0
async def test_does_reorder(autojump_clock):
    sock, scenario = await run_scenario(
        80,
        [
            ("1.1.1.1", 10, "error"),
            # This would win if we tried it first...
            ("2.2.2.2", 1, "success"),
            # But in fact we try this first, because of section 5.4
            ("::3", 0.5, "success"),
        ],
        happy_eyeballs_delay=1,
    )
    assert sock.ip == "::3"
    assert trio.current_time() == 1 + 0.5
    assert scenario.connect_times == {
        "1.1.1.1": 0,
        "::3": 1,
    }
コード例 #15
0
async def test_custom_errors_expedite(autojump_clock):
    sock, scenario = await run_scenario(
        80,
        [
            ("1.1.1.1", 0.1, "error"),
            ("2.2.2.2", 0.2, "error"),
            ("3.3.3.3", 10, "success"),
            ("4.4.4.4", 0.3, "success"),
        ],
    )
    assert sock.ip == "4.4.4.4"
    assert trio.current_time() == (0.1 + 0.2 + 0.3 + 0.3)
    assert scenario.connect_times == {
        "1.1.1.1": 0,
        "2.2.2.2": 0.1,
        "3.3.3.3": 0.1 + 0.2,
        "4.4.4.4": 0.1 + 0.2 + 0.3,
    }
コード例 #16
0
async def test_handles_no_ipv6(autojump_clock):
    sock, scenario = await run_scenario(
        80,
        # Here the ipv6 addresses fail at socket creation time, so the connect
        # configuration doesn't matter
        [
            ("::1", 0, "success"),
            ("2.2.2.2", 10, "success"),
            ("::3", 0, "success"),
            ("4.4.4.4", 0.1, "success"),
        ],
        happy_eyeballs_delay=1,
        ipv6_supported=False,
    )
    assert sock.ip == "4.4.4.4"
    assert trio.current_time() == 1 + 0.1
    assert scenario.connect_times == {
        "2.2.2.2": 0,
        "4.4.4.4": 1.0,
    }
コード例 #17
0
async def test_all_fail(autojump_clock):
    exc, scenario = await run_scenario(
        80,
        [
            ("1.1.1.1", 0.1, "error"),
            ("2.2.2.2", 0.2, "error"),
            ("3.3.3.3", 10, "error"),
            ("4.4.4.4", 0.3, "error"),
        ],
        expect_error=OSError,
    )
    assert isinstance(exc, OSError)
    assert isinstance(exc.__cause__, trio.MultiError)
    assert len(exc.__cause__.exceptions) == 4
    assert trio.current_time() == (0.1 + 0.2 + 10)
    assert scenario.connect_times == {
        "1.1.1.1": 0,
        "2.2.2.2": 0.1,
        "3.3.3.3": 0.1 + 0.2,
        "4.4.4.4": 0.1 + 0.2 + 0.3,
    }
コード例 #18
0
async def test_cancel(autojump_clock):
    with trio.move_on_after(5) as cancel_scope:
        exc, scenario = await run_scenario(
            80,
            [
                ("1.1.1.1", 10, "success"),
                ("2.2.2.2", 10, "success"),
                ("3.3.3.3", 10, "success"),
                ("4.4.4.4", 10, "success"),
            ],
            expect_error=trio.MultiError,
        )
        # What comes out should be 1 or more Cancelled errors that all belong
        # to this cancel_scope; this is the easiest way to check that
        raise exc
    assert cancel_scope.cancelled_caught

    assert trio.current_time() == 5

    # This should have been called already, but just to make sure, since the
    # exception-handling logic in run_scenario is a bit complicated and the
    # main thing we care about here is that all the sockets were cleaned up.
    scenario.check(succeeded=False)
コード例 #19
0
ファイル: broker.py プロジェクト: M-o-a-T/qbroker
    async def _keep_connected(self, task_status=trio.TASK_STATUS_IGNORED):
        """Task which keeps a connection going"""

        class TODOexception(Exception):
            pass

        self.restarting = None
        while not self._stop.is_set():
            try:
                self._reg_endpoints = set()
                async with Connection(self.cfg, self.uuid).connect(self) as conn:
                    self.restarting = False
                    self.conn = conn
                    self._connected.set()
                    if self._idle is not None:
                        self._idle.cancel()
                        self._idle = None
                    await self._do_regs()
                    task_status.started()
                    await conn.is_disconnected.wait()
            except TODOexception:
                self._connected.clear()
                logger.exception("Error. TODO Reconnecting after a while.")
            finally:
                c, self.conn = self.conn, None
                if c is not None:
                    with trio.open_cancel_scope(shield=True, deadline=trio.current_time() + 1):
                        await c.aclose()

            self.restarting = True
            if self._stop.is_set():
                break
            if self.idle_proc is not None:
                await self.nursery.start(self._run_idle)

            with trio.move_on_after(10):
                await self._stop.wait()
コード例 #20
0
ファイル: conn.py プロジェクト: M-o-a-T/qbroker
 async def _on_dead_rpc(self, channel, body, envelope, properties):
     """
     This handler is responsible for receiving dead-lettered messages.
     It builds an error reply and sends it to the client, ensuring that
     the error is discovered instantly, instead of waiting for a timeout.
     """
     try:
         codec = get_codec(properties.content_type)
         msg = codec.decode(body)
         msg = BaseMsg.load(
             msg,
             envelope,
             properties,
             conn=self,
             type="server",
             reply_channel=self._ch_reply.channel,
             reply_exchange=self._ch_reply.exchange
         )
         reply = msg.make_response(self)
         reply_to = getattr(msg, 'reply_to', None)
         exc = envelope.exchange_name
         if exc.startswith("dead"):
             exc = properties.headers['x-death'][0]['exchange']
         exc = DeadLettered(exc, envelope.routing_key)
         if reply_to is None:
             # usually, this is no big deal: call debug(), not exception().
             logger.debug("Undeliverable one-way message", exc_info=exc)
             return
         reply.set_error(exc, envelope.routing_key)
         reply, props = reply.dump(self, codec=self.codec)
         logger.debug("DeadLetter %s to %s", envelope.routing_key, self._ch_reply.exchange)
         await self._ch_reply.channel.publish(
             reply, self._ch_reply.exchange, reply_to, properties=props
         )
     finally:
         with trio.open_cancel_scope(shield=True, deadline=trio.current_time() + 1):
             await channel.basic_client_ack(envelope.delivery_tag)
コード例 #21
0
async def current_time():
    return trio.current_time()
コード例 #22
0
ファイル: network.py プロジェクト: Tubbz-alt/ddht
 async def _track_last_pong(self) -> None:
     async with self.dispatcher.subscribe(PongMessage) as subscription:
         async for message in subscription:
             self._last_pong_at[
                 message.sender_node_id] = trio.current_time()
コード例 #23
0
async def test_one_host_slow_success(autojump_clock):
    sock, scenario = await run_scenario(81, [("1.2.3.4", 100, "success")])
    assert sock.ip == "1.2.3.4"
    assert trio.current_time() == 100
コード例 #24
0
async def test_client_works(autojump_clock, sample_bls_key_pairs,
                            seconds_per_slot, slots_per_epoch):
    """
    This test constructs a ``Client`` with enough known inputs to compute an expected set of
    signatures after running for a given amount of time. The test fails if the expected signatures
    are not observed as outputs of the client.
    """
    slots_per_epoch = 4
    # NOTE: start 2 epochs ahead of genesis to emulate the client
    # waiting to the time it can start polling the beacon node
    # and the getting duties in the first epoch in the epoch prior to genesis
    total_epochs_to_run = 4
    epochs_before_genesis_to_start = 2
    epochs_after_genesis_to_end = total_epochs_to_run - epochs_before_genesis_to_start
    # Set genesis so that we aren't aligned with a slot, which could hide some
    # bugs we will otherwise see...
    non_aligned_time = trio.current_time() + seconds_per_slot / 3
    seconds_per_epoch = seconds_per_slot * slots_per_epoch
    genesis_time = int(non_aligned_time +
                       epochs_before_genesis_to_start * seconds_per_epoch)

    public_key = tuple(sample_bls_key_pairs.keys())[0]
    key_store = KeyStore(sample_bls_key_pairs)
    beacon_node = MockBeaconNode(
        slots_per_epoch,
        seconds_per_slot,
        duty_fetcher=_mk_duty_fetcher(public_key, slots_per_epoch,
                                      seconds_per_slot),
    )

    clock = Clock(
        seconds_per_slot,
        genesis_time,
        slots_per_epoch,
        seconds_per_epoch,
        trio.current_time,
    )
    client = Client(key_store, clock, beacon_node)

    try:
        async with background_trio_service(client):
            await trio.sleep(total_epochs_to_run * seconds_per_epoch)
    except DaemonTaskExit:
        # NOTE: there is a race condition in ``async_service`` that will
        # trigger ``DaemonTaskExit`` when the test would otherwise pass.
        # See: https://github.com/ethereum/async-service/issues/54
        pass

    fulfilled_duties = tuple(
        filter(
            lambda duty: duty.tick_for_execution.epoch <
            epochs_after_genesis_to_end,
            beacon_node.given_duties,
        ))
    assert len(beacon_node.published_signatures) == len(fulfilled_duties)
    randao_provider = mk_randao_provider(key_store.private_key_for)
    for duty in fulfilled_duties:
        if duty.duty_type == DutyType.Attestation:
            operation = await beacon_node.fetch_attestation(
                duty.validator_public_key,
                duty.tick_for_execution.slot,
                duty.committee_index,
            )
        else:
            randao_reveal = randao_provider(duty.validator_public_key,
                                            duty.tick_for_execution.epoch)
            operation = await beacon_node.fetch_block_proposal(
                duty.tick_for_execution.slot, randao_reveal)

        observed_signature = beacon_node.published_signatures[duty]
        expected_signature = sign(duty, operation, key_store.private_key_for)
        assert observed_signature == expected_signature
コード例 #25
0
ファイル: worker_context.py プロジェクト: pgjones/hypercorn
 def time() -> float:
     return trio.current_time()
コード例 #26
0
    async def _periodically_advertise_content(self) -> None:
        await self._network.routing_table_ready()

        send_channel, receive_channel = trio.open_memory_channel[ContentKey](
            self._concurrency)

        for _ in range(self._concurrency):
            self.manager.run_daemon_task(self._broadcast_worker,
                                         receive_channel)

        async for _ in every(30 * 60):
            start_at = trio.current_time()

            total_keys = len(self.content_storage)
            if not total_keys:
                continue

            first_key = first(
                self.content_storage.iter_closest(
                    NodeID(secrets.token_bytes(32))))

            self.logger.info(
                "content-processing-starting: total=%d  start=%s",
                total_keys,
                first_key.hex(),
            )

            processed_keys = 0

            last_key = first_key
            has_wrapped_around = False

            while self.manager.is_running:
                elapsed = trio.current_time() - start_at
                content_keys = tuple(
                    take(
                        self._concurrency * 2,
                        self.content_storage.enumerate_keys(
                            start_key=last_key),
                    ))

                # TODO: We need to adjust the
                # `ContentStorageAPI.enumerate_keys` to allow a
                # non-inclusive left bound so we can query all the keys
                # **after** the last key we processed.
                if content_keys and content_keys[0] == last_key:
                    content_keys = content_keys[1:]

                if not content_keys:
                    last_key = None
                    has_wrapped_around = True
                    continue

                for content_key in content_keys:
                    await send_channel.send(content_key)

                last_key = content_keys[-1]
                if has_wrapped_around and last_key >= first_key:
                    break

                processed_keys += len(content_keys)
                progress = processed_keys * 100 / total_keys

                self.logger.debug(
                    "content-processing: progress=%0.1f  processed=%d  "
                    "total=%d  at=%s  elapsed=%s",
                    progress,
                    processed_keys,
                    total_keys,
                    "None" if last_key is None else last_key.hex(),
                    humanize_seconds(int(elapsed)),
                )

            self.logger.info(
                "content-processing-finished: processed=%d/%d  elapsed=%s",
                processed_keys,
                total_keys,
                humanize_seconds(int(elapsed)),
            )
コード例 #27
0
 def _update(self):
     now = trio.current_time()
     elapsed = now - self._last_update_time
     self._tokens += elapsed * self._max_per_second
     self._tokens = max(self._tokens, self._max_burst)
     self._last_update_time = now
コード例 #28
0
async def test_autoawait():
    await ensure_portal()
    sync_sleep(1)
    await trio.sleep(2)
    sync_sleep(3)
    assert trio.current_time() == 6
コード例 #29
0
ファイル: test_initiator.py プロジェクト: math2001/nine43
async def test_quitter() -> None:
    # left is a net.JSONStream, right just a trio.abc.Stream

    async def spawn(
        connch: SendCh[trio.abc.Stream],
        playerch: RecvCh[Player],
        quitch: SendCh[Player],
    ) -> None:

        left, right = new_half_stream_pair()

        await connch.send(right)
        assert await left.read() == {"type": "log in"}
        await left.write({"type": "log in", "username": "******"})
        assert await left.read() == {
            "type": "log in update",
            "state": "accepted"
        }

        # the initiator should spit the player out
        player = await playerch.receive()

        assert player.username == "first"
        assert player.stream == net.JSONStream(right)

        # player quits from the lobby or a sub, or anything that isn't the
        # initiator
        await quitch.send(player)

        left, right = new_half_stream_pair()

        await connch.send(right)
        assert await left.read() == {"type": "log in"}
        # notice how we use the same username. It shouldn't block, because
        # the other quitted
        await left.write({"type": "log in", "username": "******"})
        assert await left.read() == {
            "type": "log in update",
            "state": "accepted"
        }

        player = await playerch.receive()

        assert player.username == "first"
        assert player.stream == net.JSONStream(right)

        await conn_sendch.aclose()
        await quit_sendch.aclose()

    conn_sendch, conn_recvch = trio.open_memory_channel[trio.abc.Stream](0)
    player_sendch, player_recvch = trio.open_memory_channel[Player](0)
    quit_sendch, quit_recvch = trio.open_memory_channel[Player](0)

    async with trio.open_nursery() as nursery:
        nursery.cancel_scope.deadline = trio.current_time() + 2
        nursery.start_soon(spawn, conn_sendch, player_recvch, quit_sendch)
        nursery.start_soon(initiator.initiator, conn_recvch, player_sendch,
                           quit_recvch)

    assert (nursery.cancel_scope.cancelled_caught is
            False), f"spawn timed out after 2 seconds"
コード例 #30
0
 def after_io_wait(self, timeout):
     duration = trio.current_time() - self._sleep_time
     print("### finished I/O check (took {} seconds)".format(duration))
コード例 #31
0
 def before_io_wait(self, timeout):
     if timeout:
         print("### waiting for I/O for up to {} seconds".format(timeout))
     else:
         print("### doing a quick check for I/O")
     self._sleep_time = trio.current_time()
コード例 #32
0
ファイル: monitor.py プロジェクト: ziirish/burp-ui
    async def handle(self, server_stream: trio.abc.Stream):
        try:
            ident = next(CONNECTION_COUNTER)
            self.logger.info(f'{ident} - handle_request: started')
            t0 = trio.current_time()
            lengthbuf = await server_stream.receive_some(8)
            if not lengthbuf:
                return
            length, = struct.unpack('!Q', lengthbuf)
            data = await self.receive_all(server_stream, length)
            self.logger.info(f'{ident} - recv: {data!r}')
            txt = to_unicode(data)
            if txt == 'RE':
                return
            req = json.loads(txt)
            if req['password'] != self.password:
                self.logger.warning(f'{ident} -----> Wrong Password <-----')
                await server_stream.send_all(b'KO')
                return
            try:
                func = req.get('func')
                if func == 'monitor_version':
                    response = __version__
                elif func in ['client_version', 'server_version', 'batch_list_supported']:
                    async with self.get_mon(ident) as mon:
                        response = getattr(mon, func, '')
                        if func in ['batch_list_supported']:
                            response = json.dumps(response)
                elif func == 'statistics':
                    tmp = []
                    res = {
                        'alive': False,
                        'server_version': 'unknown',
                        'client_version': 'unknown'
                    }
                    while not res['alive'] and len(tmp) < self.pool.size:
                        mon = await self.pool.get()
                        tmp.append(mon)
                        if mon.alive:
                            res = {
                                'alive': True,
                                'server_version': getattr(mon, 'server_version', ''),
                                'client_version': getattr(mon, 'client_version', '')
                            }
                            break
                        await trio.sleep(0.5)
                    for mon in tmp:
                        await self.pool.put(mon)
                    response = json.dumps(res)
                else:
                    query = req['query']
                    cache = req.get('cache', True)

                    self._cleanup_cache()
                    # return cached results
                    if cache and query in self._status_cache:
                        response = self._status_cache[query]
                    else:
                        async with self.get_mon(ident) as mon:
                            response = mon.status(query, timeout=self.timeout, cache=False, raw=True)

                        if cache:
                            self._status_cache[query] = response
                self.logger.debug(f'{ident} - Sending: {response}')
                if response:
                    await server_stream.send_all(b'OK')
                else:
                    await server_stream.send_all(b'KO')
            except BUIserverException as exc:
                await server_stream.send_all(b'ER')
                response = str(exc)
                self.logger.error(response, exc_info=exc)
                self.logger.warning(f'Forwarding Exception: {response}')

            if response:
                response = to_bytes(response)
                await server_stream.send_all(struct.pack('!Q', len(response)))
                await server_stream.send_all(response)

            t3 = trio.current_time()
            t = t3 - t0
            self.logger.info(f'{ident} - Completed in {t:.3f}s')
        except Exception as exc:
            self.logger.error(f'Unexpected error: {exc}')
            response = str(exc)
            self.logger.error(response, exc_info=exc)
            try:
                await server_stream.send_all(b'ER')
                self.logger.warning(f'Forwarding Exception: {response}')

                response = to_bytes(response)
                await server_stream.send_all(struct.pack('!Q', len(response)))
                await server_stream.send_all(response)
            except trio.BrokenResourceError:
                # Broken Pipe, we cannot forward the error
                pass
コード例 #33
0
async def test_one_host_slow_success(autojump_clock):
    sock, scenario = await run_scenario(81, [("1.2.3.4", 100, "success")])
    assert sock.ip == "1.2.3.4"
    assert trio.current_time() == 100
コード例 #34
0
ファイル: test_buffers.py プロジェクト: jkoelker/slurry
 async def timestamp():
     yield trio.current_time()
コード例 #35
0
ファイル: tasks-with-trace.py プロジェクト: zed/trio
 def after_io_wait(self, timeout):
     duration = trio.current_time() - self._sleep_time
     print("### finished I/O check (took {} seconds)".format(duration))
コード例 #36
0
async def test_network_pings_oldest_routing_table(tester, alice, bob,
                                                  autojump_clock):
    async with AsyncExitStack() as stack:
        carol = tester.node()
        dylan = tester.node()

        # startup a few peers to put in the routing table
        bob_network = await stack.enter_async_context(bob.network())
        carol_network = await stack.enter_async_context(carol.network())

        # populate the routing table and ENR database
        alice.enr_db.set_enr(bob.enr)
        alice.enr_db.set_enr(carol.enr)
        alice.enr_db.set_enr(dylan.enr)

        async with AsyncExitStack() as handshakes_done:
            await handshakes_done.enter_async_context(
                bob.events.session_handshake_complete.subscribe_and_wait())
            await handshakes_done.enter_async_context(
                carol.events.session_handshake_complete.subscribe_and_wait())

            alice_network = await stack.enter_async_context(
                alice.network(bootnodes=[bob.enr, carol.enr, dylan.enr]), )
        alice_network.routing_table.update(dylan.node_id)
        alice_network.routing_table.update(bob.node_id)
        alice_network.routing_table.update(carol.node_id)

        assert alice_network.routing_table._contains(bob.node_id, False)
        assert alice_network.routing_table._contains(carol.node_id, False)
        assert alice_network.routing_table._contains(dylan.node_id, False)

        # run through a few checkpoints which should remove dylan from the routing table
        await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)
        await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)

        assert alice_network.routing_table._contains(bob.node_id, False)
        assert alice_network.routing_table._contains(carol.node_id, False)
        assert not alice_network.routing_table._contains(dylan.node_id, False)

        # now take carol offline and let her be removed
        carol_network.get_manager().cancel()
        alice_network._last_pong_at[carol.node_id] = (
            trio.current_time() - ROUTING_TABLE_KEEP_ALIVE - 1)
        await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)
        await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)

        assert alice_network.routing_table._contains(bob.node_id, False)
        assert not alice_network.routing_table._contains(carol.node_id, False)
        assert not alice_network.routing_table._contains(dylan.node_id, False)

        # now take bob offline and let her be removed
        bob_network.get_manager().cancel()
        alice_network._last_pong_at[bob.node_id] = (trio.current_time() -
                                                    ROUTING_TABLE_KEEP_ALIVE -
                                                    1)
        await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)
        await trio.sleep(ROUTING_TABLE_KEEP_ALIVE)

        assert not alice_network.routing_table._contains(bob.node_id, False)
        assert not alice_network.routing_table._contains(carol.node_id, False)
        assert not alice_network.routing_table._contains(dylan.node_id, False)
コード例 #37
0
ファイル: ratelimits.py プロジェクト: A5rocks/bloom
    async def request(self,
                      req: Request[ReturnT],
                      *,
                      max_sleep: float = math.inf) -> ReturnT:
        # this code makes the assumption of only a single major param.
        major_parameter = req.args.get('channel_id') or req.args.get(
            'guild_id')

        # the routes with only webhook_id and not webhook_token are not
        # ratelimited, so this is perfectly fine.
        if 'webhook_id' in req.args and 'webhook_token' in req.args:
            # str key (cause interaction's webhook id is the app id...)
            major_parameter = str(req.args['webhook_id']) + str(
                req.args['webhook_token'])

        async with self.locks[req.route][major_parameter]:
            with trio.fail_after(max_sleep):
                async with trio.open_nursery() as nursery:
                    for bucket in self.buckets[req.route][major_parameter]:
                        nursery.start_soon(bucket.wait_for)

            # make the request
            kw_args = {}

            if req.params is not None:
                kw_args['params'] = req.params

            if req.json is not None:
                kw_args['json'] = req.json

            if req.headers is not None:
                kw_args['headers'] = req.headers

            if req.data is not None:
                kw_args['data'] = req.data

            if req.files is not None:
                kw_args['files'] = req.files

            result = await self.http.request(req.method, req.url, **kw_args)
            headers = result.headers

            # TODO: handle errors (429, ...) correctly (including decoding of error type)
            if result.status_code >= 500:
                # TODO: how to handle server errors? retry?
                result.raise_for_status()
            elif result.status_code >= 400:
                # TODO: decode returned error type, seperate out 400 vs 401 vs 403 vs 429
                result.raise_for_status()
            elif result.status_code >= 300:
                # this should never happen? should I log anyways?
                result.raise_for_status()

            # process the result

            bucket_hash = headers.get('X-RateLimit-Bucket')

            if bucket_hash is not None and major_parameter in self.buckets_by_hash[
                    bucket_hash]:
                bucket = self.buckets_by_hash[bucket_hash][major_parameter]
                bucket.remaining = int(headers['X-RateLimit-Remaining'])
                # TODO: should this have a configuration option to use `X-Ratelimit-Reset` instead?
                bucket.reset_at = trio.current_time() + float(
                    headers['X-RateLimit-Reset-After'])

            elif bucket_hash is not None:
                bucket = Bucket(
                    int(headers['X-RateLimit-Remaining']),
                    trio.current_time() +
                    float(headers['X-RateLimit-Reset-After']),
                )

                self.buckets[req.route][major_parameter].append(bucket)
                self.buckets_by_hash[bucket_hash][major_parameter] = bucket

            else:
                # ?? no ratelimit?
                pass

            # runtime-only attribute :S
            # TODO: clean this up and put unsafe parts in utility class/function
            result_type = req.type_args.inner  # type: ignore[attr-defined]

            if result.status_code == 204:
                if (result_type is None or None in typing.get_args(result_type)
                        or type(None) in typing.get_args(result_type)):
                    return None  # type: ignore[return-value]  # ResultT can be None
                else:
                    raise ValueError(
                        f"No body received for {req.method} {req.url}")
            else:
                return_val: ReturnT = self.converter.structure(
                    result.json(), result_type)
                return return_val
コード例 #38
0
async def test_one_host_quick_success(autojump_clock):
    sock, scenario = await run_scenario(80, [("1.2.3.4", 0.123, "success")])
    assert sock.ip == "1.2.3.4"
    assert trio.current_time() == 0.123
コード例 #39
0
 async def shutdown():
     await shutdown_trigger.wait()
     nursery.cancel_scope.deadline = trio.current_time(
     ) + shutdown_timeout
     logger.debug('Giving child nursery %.3f seconds to shut down...',
                  shutdown_timeout)
コード例 #40
0
 def __init__(self, start_at: float) -> None:
     self._ema = EMA(start_at, 0.05)
     self._last_at = trio.current_time()
コード例 #41
0
 async def trio_main(in_host):
     assert trio.current_time() == 0
     in_host(lambda: setattr(clock, "autojump_threshold", 0))
     await trio.sleep(DURATION)
     assert trio.current_time() == DURATION
コード例 #42
0
async def test_basic(autojump_clock: trio.testing.MockClock) -> None:
    parent = MultiCancelScope()
    finish_order = []

    async def cancel_child_before_entering() -> None:
        child = parent.open_child()
        assert not child.cancel_called
        child.cancel()
        assert child.cancel_called
        assert not child.cancelled_caught
        await trio.sleep(0.2)
        with child:
            assert not child.cancelled_caught
            await trio.sleep(1)
        assert child.cancelled_caught
        finish_order.append("cancel_child_before_entering")

    async def cancel_child_after_entering() -> None:
        with parent.open_child() as child:
            await trio.sleep(0.3)
            child.cancel()
            await trio.sleep(1)
        assert child.cancel_called
        assert child.cancelled_caught
        finish_order.append("cancel_child_after_entering")

    async def cancel_child_via_local_deadline() -> None:
        child = parent.open_child()
        child.deadline = trio.current_time() + 0.4
        deadline_before_entering = child.deadline
        with child:
            assert child.deadline == deadline_before_entering
            await trio.sleep(1)
        assert child.cancel_called
        assert child.cancelled_caught
        finish_order.append("cancel_child_via_local_deadline")

    async def cancel_child_via_local_deadline_2() -> None:
        child = parent.open_child()
        child.deadline = trio.current_time() + 1.0
        with child:
            child.deadline -= 0.9
            await trio.sleep(1)
        assert child.cancel_called
        assert child.cancelled_caught
        finish_order.append("cancel_child_via_local_deadline_2")

    async def cancel_parent_before_entering() -> None:
        child = parent.open_child()
        await trio.sleep(0.6)
        assert child.cancel_called
        assert not child.cancelled_caught
        with child:
            await trio.sleep(1)
        assert child.cancelled_caught
        finish_order.append("cancel_parent_before_entering")

    async def cancel_parent_after_entering() -> None:
        with parent.open_child() as child:
            await trio.sleep(1)
        assert child.cancel_called
        assert child.cancelled_caught
        finish_order.append("cancel_parent_after_entering")

    async with trio.open_nursery() as nursery:
        nursery.start_soon(cancel_child_before_entering)
        nursery.start_soon(cancel_child_after_entering)
        nursery.start_soon(cancel_child_via_local_deadline)
        nursery.start_soon(cancel_child_via_local_deadline_2)
        nursery.start_soon(cancel_parent_before_entering)
        nursery.start_soon(cancel_parent_after_entering)
        await trio.sleep(0.5)
        assert "MultiCancelScope cancelled" not in repr(parent)
        assert not parent.cancel_called
        parent.cancel()
        assert parent.cancel_called
        assert "MultiCancelScope cancelled" in repr(parent)
        parent.cancel()
        await trio.sleep(0.2)

        nursery.cancel_scope.deadline = trio.current_time() + 0.1
        with parent.open_child() as child:
            child.deadline = nursery.cancel_scope.deadline
            assert child.cancel_called
            assert not child.cancelled_caught
            await trio.sleep_forever()
        assert child.cancelled_caught
        finish_order.append("cancel_parent_before_creating")

    assert not nursery.cancel_scope.cancelled_caught
    assert finish_order == [
        "cancel_child_via_local_deadline_2",  # t+0.1
        "cancel_child_before_entering",  # t+0.2
        "cancel_child_after_entering",  # t+0.3
        "cancel_child_via_local_deadline",  # t+0.4
        "cancel_parent_after_entering",  # t+0.5
        "cancel_parent_before_entering",  # t+0.6
        "cancel_parent_before_creating",  # t+0.7
    ]
コード例 #43
0
async def test_one_host_quick_success(autojump_clock):
    sock, scenario = await run_scenario(80, [("1.2.3.4", 0.123, "success")])
    assert sock.ip == "1.2.3.4"
    assert trio.current_time() == 0.123
コード例 #44
0
ファイル: _buffers.py プロジェクト: jkoelker/slurry
 async def pull_task():
     async with buffer_input_channel, aclosing(source) as aiter:
         async for item in aiter:
             await buffer_input_channel.send(
                 (item, trio.current_time() + self.interval))
コード例 #45
0
async def test_one_host_slow_fail(autojump_clock):
    exc, scenario = await run_scenario(83, [("1.2.3.4", 100, "error")],
                                       expect_error=OSError)
    assert isinstance(exc, OSError)
    assert trio.current_time() == 100
コード例 #46
0
 def update(self) -> None:
     now = trio.current_time()
     delta = now - self._last_at
     self._ema.update(delta)
     self._last_at = now
コード例 #47
0
 def _f():
     return trio.current_time()
コード例 #48
0
async def test_one_host_slow_fail(autojump_clock):
    exc, scenario = await run_scenario(
        83, [("1.2.3.4", 100, "error")], expect_error=OSError
    )
    assert isinstance(exc, OSError)
    assert trio.current_time() == 100
コード例 #49
0
ファイル: trio.py プロジェクト: tweimer/plugin.video.netflix
 async def time(self) -> float:
     return trio.current_time()
コード例 #50
0
ファイル: conn.py プロジェクト: M-o-a-T/qbroker
    async def register(self, ep):
        assert ep.tag not in self.rpcs
        ch = getattr(self, '_ch_' + ep.type)
        cfg = self.cfg

        dn = n = ep.name
        if ep.name.endswith('.#'):
            n = n[:-2]
            dn = n + '._all_'
        if len(n) > 1 and '#' in n:
            raise RuntimeError("I won't find that")

        if ep.tag in self.rpcs:
            raise RuntimeError("multiple registration of " + ep.tag)

        self.rpcs[ep.tag] = ep
        chan = None
        try:
            ep._c_channel = chan = await self.amqp.channel()
            d = {}
            ttl = ep.ttl or cfg.ttl[ep.type]
            if ttl:
                d["x-dead-letter-exchange"] = cfg.queues['dead']
                d["x-message-ttl"] = int(1000 * ttl)

            if ep.durable:
                if isinstance(ep.durable, str):
                    dn = ep.durable
                else:
                    dn = self.cfg.queues['msg'] + ep.tag
                chan = await self.amqp.channel()
                q = await chan.queue_declare(
                    dn,
                    auto_delete=False,
                    passive=False,
                    exclusive=False,
                    durable=True,
                    arguments=d
                )
            elif ep.type == "rpc":
                chan = await self.amqp.channel()
                q = await chan.queue_declare(
                    cfg.queues[ep.type] + ep.name,
                    auto_delete=True,
                    durable=False,
                    passive=False,
                    arguments=d
                )
            else:
                chan = self._ch_alert.channel
                q = self._ch_alert.queue
            logger.debug("Chan %s: bind %s %s %s", ch.channel, ep.exchange, ep.name, q['queue'])
            await chan.queue_bind(q['queue'], ep.exchange, routing_key=ep.name)

            await chan.basic_qos(prefetch_count=1, prefetch_size=0, connection_global=False)
            logger.debug("Chan %s: read %s", ch, q['queue'])
            await chan.basic_consume(
                queue_name=q['queue'],
                callback=self._on_rpc_in if ep.type == "rpc" else self._on_alert_in
            )

            ep._c_channel = chan
            ep._c_queue = q

        except BaseException:  # pragma: no cover
            del self.rpcs[ep.tag]
            if chan is not None:
                del ep._c_channel
                with trio.open_cancel_scope(shield=True, deadline=trio.current_time() + 1):
                    await chan.close()
            raise
コード例 #51
0
ファイル: tasks-with-trace.py プロジェクト: zed/trio
 def before_io_wait(self, timeout):
     if timeout:
         print("### waiting for I/O for up to {} seconds".format(timeout))
     else:
         print("### doing a quick check for I/O")
     self._sleep_time = trio.current_time()
コード例 #52
0
async def test_sleep():
    start_time = trio.current_time()
    await trio.sleep(1)
    end_time = trio.current_time()
    assert end_time - start_time >= 1
コード例 #53
0
async def test_rwlock(autojump_clock: trio.testing.MockClock) -> None:
    lock = RWLock()
    assert not lock.locked()

    lock.acquire_read_nowait()
    assert lock.locked() == "read"

    with pytest.raises(RuntimeError):
        lock.acquire_read_nowait()
    with pytest.raises(RuntimeError):
        lock.acquire_write_nowait()
    lock.release()
    with pytest.raises(RuntimeError):
        lock.release()

    with trio.testing.assert_checkpoints():
        await lock.acquire_write()
    assert lock.locked() == "write"
    with pytest.raises(RuntimeError):
        await lock.acquire_read()
    with pytest.raises(RuntimeError):
        await lock.acquire_write()
    lock.release()

    async with lock.read_locked():
        assert lock.locked() == "read"

    async with lock.write_locked():
        assert lock.locked() == "write"

    start_order = itertools.count()
    acquire_times: List[Optional[float]] = [None] * 10

    async def holder_task(for_write: bool,
                          task_status: TaskStatus[trio.lowlevel.Task]) -> None:
        my_slot = next(start_order)
        repr(lock)  # smoke test
        task_status.started(trio.lowlevel.current_task())
        await lock.acquire(for_write=for_write)
        acquire_times[my_slot] = trio.current_time()
        try:
            await trio.sleep(1)
        finally:
            lock.release()

    async with trio.open_nursery() as nursery:
        t0 = await nursery.start(holder_task, True)
        t1a = await nursery.start(holder_task, False)
        t1b = await nursery.start(holder_task, False)
        t1c = await nursery.start(holder_task, False)
        await nursery.start(holder_task, True)  # t2
        await nursery.start(holder_task, False)  # t3a
        await nursery.start(holder_task, False)  # t3b
        await nursery.start(holder_task, True)  # t4
        await nursery.start(holder_task, True)  # t5
        t6 = await nursery.start(holder_task, False)

        await trio.sleep(0.5)
        assert "write-locked" in repr(lock)
        assert lock.statistics().__dict__ == {
            "locked": "write",
            "readers": frozenset(),
            "writer": t0,
            "readers_waiting": 6,
            "writers_waiting": 3,
        }
        with pytest.raises(RuntimeError):
            lock.release()
        with pytest.raises(trio.WouldBlock):
            lock.acquire_read_nowait()
        with pytest.raises(trio.WouldBlock):
            lock.acquire_write_nowait()

        await trio.sleep(1)
        assert "read-locked" in repr(lock)
        assert lock.statistics().__dict__ == {
            "locked": "read",
            "readers": frozenset([t1a, t1b, t1c]),
            "writer": None,
            "readers_waiting": 3,
            "writers_waiting": 3,
        }
        with pytest.raises(RuntimeError):
            lock.release()
        with pytest.raises(trio.WouldBlock):
            # even in read state, can't acquire for read if writers are waiting
            lock.acquire_read_nowait()
        with pytest.raises(trio.WouldBlock):
            lock.acquire_write_nowait()

        await trio.sleep(5)
        assert "read-locked" in repr(lock)
        assert lock.statistics().__dict__ == {
            "locked": "read",
            "readers": frozenset([t6]),
            "writer": None,
            "readers_waiting": 0,
            "writers_waiting": 0,
        }
        lock.acquire_read_nowait()
        lock.release()
        with pytest.raises(trio.WouldBlock):
            lock.acquire_write_nowait()

    assert acquire_times == pytest.approx([0, 1, 1, 1, 2, 3, 3, 4, 5, 6])

    # test cancellation
    start_order = itertools.count()
    async with trio.open_nursery() as nursery:
        await nursery.start(holder_task, True)
        await nursery.start(holder_task, True)
        await nursery.start(holder_task, False)
        await nursery.start(holder_task, False)
        await nursery.start(holder_task, False)
        await nursery.start(holder_task, True)
        await nursery.start(holder_task, False)
        await nursery.start(holder_task, False)
        await nursery.start(holder_task, True)
        await nursery.start(holder_task, True)
        await nursery.start(holder_task, False)

        await trio.sleep(0.5)
        nursery.cancel_scope.cancel()

    assert nursery.cancel_scope.cancelled_caught
    assert trio.current_time() == pytest.approx(7.5)
    assert "unlocked" in repr(lock)
    assert lock.statistics().__dict__ == {
        "locked": "",
        "readers": frozenset(),
        "writer": None,
        "readers_waiting": 0,
        "writers_waiting": 0,
    }
コード例 #54
0
def get_trio_time():
    return trio.current_time()
コード例 #55
0
 def __init__(self, capacity, rate):
     self.capacity = self._last_value = capacity
     self._last_ts = trio.current_time()
     self.rate = rate
コード例 #56
0
async def test_sleep_efficiently_and_reliably(autojump_clock):
    start_time = trio.current_time()
    await trio.sleep(1)
    end_time = trio.current_time()
    assert abs(start_time - end_time) == 1
コード例 #57
0
def get_current_time():
    return trio.current_time() - start_time
コード例 #58
0
async def subscription_transform(stream: AsyncIterator[T], timeout: float=None,
        granularity: Callable[[T, T], bool]=None, granularity_timeout: float=None) -> AsyncIterator[T]:
    """\
    Implements the stream transformation described in `subscription.proto`.
    The identity transform would be `subscription_transform(stream, granularity=lambda a, b: True)`:
    no timing behavior is added, and all values are treated as distinct, and thus emitted.

    If `granularity` is not given, values are compared for equality, thus from `[0, 0, 2, 1, 1, 0, 0, 1]`,
    elements 1, 4, and 6 would be discarded as being duplicates of their previous values.
    A typical example granularity measure for numbers is a lower bound on value difference,
    e.g. `lambda a, b: abs(a-b) > THRESHOLD`.

    The `timeout` parameter specifies a minimum time to pass between subsequent emitted values.
    After the timeout has passed, the most recently received value (if any) will be considered
    as if it had just arrived on the input stream,
    and then all subsequent values are considered until the next emission.
    Suppose the input is [0, 1, 0, 1, 0] and the timeout is just enough to skip one value completely.
    After emitting `0`, the first `1` is skipped, and the second `0` is not emitted because it's not a new value.
    The second `1` is emitted; because at that time no timeout is active (the last emission was too long ago).
    Immediately after the emission the timeout starts again, the last `0` arrives and the input stream ends.
    Because the `0` should be emitted, the stream awaits the timeout a final time, emits the value, and then terminates.
    Had the last value been a `1`, the output stream would have terminated immediately,
    as the value would not be emitted.

    The `granularity_timeout` parameter specifies a maximum time to pass between subsequent emitted values,
    as long as there were input values at all.
    The `granularity` may discard values of the input stream,
    leading in the most extreme case to no emitted values at all.
    If a `granularity_timeout` is given, then the most recent input value is emitted after that time,
    restarting both the ordinary and granularity timeout in the process.
    Suppose the input is [0, 0, 0, 1, 1, 0, 0] and the granularity timeout is just enough to skip one value completely.
    After emitting `0` and skipping the next one, another `0` is emitted:
    although the default granularity discarded the unchanged value, the granularity timeout forces its emission.
    Then, the first `1` and next `0` are emitted as normal, as changed values appeared before the timeout ran out.
    After the last `0`, the input ends.
    The stream waits a final time for the granularity timeout, outputs the value, and then terminates.

    Suppose the input is [0, 0] and the granularity timeout is so low that it runs out before the second zero.
    The first zero is the last value seen before the granularity timeout ran out,
    but once emitted it is out of the picture. The second zero is simply emitted as soon as it arrives.
    """
    if timeout is None:
        timeout = -math.inf
    if granularity is None:
        granularity = lambda a, b: a != b
    if granularity_timeout is None:
        granularity_timeout = math.inf

    async with _skipping_stream(stream) as anext:
        # we need a first value for our granularity checks
        value, eof = await anext()
        if eof:
            return

        while True:
            last_emit_at = trio.current_time()
            yield value
            if eof:
                return

            last_value = value
            has_value = False

            with trio.move_on_at(last_emit_at + granularity_timeout):
                with trio.move_on_at(last_emit_at + timeout):
                    while not eof:
                        # wait until there's news, save a value if there is
                        _value, eof = await anext()
                        if not eof:
                            # there's a value
                            value = _value
                            has_value = True

                # if we get here, either the timeout ran out or EOF was reached
                if eof:
                    if not has_value:
                        # no value at all
                        return
                    elif granularity(last_value, value):
                        # a good value! Wait for the timeout, then emit that value
                        await trio.sleep_until(last_emit_at + timeout)
                        continue
                    elif granularity_timeout < math.inf:
                        # there's still a chance to send the value after the granularity timeout
                        await trio.sleep_forever()
                    else:
                        # again, nothing to send
                        return
                    # note that none of the branches continue here

                # not EOF, so do regular waiting for values
                while not eof and (not has_value or not granularity(last_value, value)):
                    # wait until there's news, save a value if there is
                    _value, eof = await anext()
                    if not eof:
                        # there's a value
                        value = _value
                        has_value = True

                if eof:
                    # EOF was reached.
                    # If there is a value, we know that the granularity did not break the loop;
                    # no need to check that again.
                    if not has_value:
                        # no value at all
                        return
                    elif granularity_timeout < math.inf:
                        # there's still a chance to send the value after the granularity timeout
                        await trio.sleep_forever()
                    else:
                        # again, nothing to send
                        return
                    # note that none of the branches continue here

            # after the granularity timeout, we're fine with any value
            if has_value:
                continue

            # wait for the next event
            value, eof = await anext()
            if eof:
                return