async def test_asyncio_subscription_api_does_not_match_inherited_classes(endpoint_pair):
    subscriber, other = endpoint_pair
    remote = _get_remote(other, subscriber.name)

    assert StreamEvent not in remote.get_subscribed_events()
    assert StreamEvent not in subscriber.get_subscribed_events()

    # trigger a `wait_for` call to run in the background and give it a moment
    # to spin up.
    task = asyncio.ensure_future(subscriber.wait_for(StreamEvent))
    await asyncio.sleep(0.01)

    # Now that we are within the wait_for, verify that the subscription is active
    # on the remote
    assert StreamEvent in remote.get_subscribed_events()
    assert StreamEvent in subscriber.get_subscribed_events()

    # Broadcast two of the inherited events and then the correct event.
    await other.broadcast(
        InheretedStreamEvent(), BroadcastConfig(require_subscriber=False)
    )
    await other.broadcast(
        InheretedStreamEvent(), BroadcastConfig(require_subscriber=False)
    )
    await other.broadcast(StreamEvent())

    # wait for a received event, finishing the stream and
    # consequently the subscription
    event = await task
    assert isinstance(event, StreamEvent)
Ejemplo n.º 2
0
    async def notify_resource_available(self) -> None:

        # We currently need this to give plugins the chance to start as soon
        # as the `PeerPool` is available. In the long term, the peer pool may become
        # a plugin itself and we can get rid of this.
        peer_pool = self.get_peer_pool()

        await self.event_bus.broadcast(
            ResourceAvailableEvent(resource=(peer_pool, self.cancel_token),
                                   resource_type=type(peer_pool)),
            BroadcastConfig(internal=True),
        )

        # This broadcasts the *local* chain, which is suited for tasks that aren't blocking
        # for too long. There may be value in also broadcasting the proxied chain.
        await self.event_bus.broadcast(
            ResourceAvailableEvent(resource=self.get_chain(),
                                   resource_type=BaseChain),
            BroadcastConfig(internal=True),
        )

        # Broadcasting the DbManager internally, ensures plugins that run in the networking process
        # can reuse the existing connection instead of creating additional new connections
        await self.event_bus.broadcast(
            ResourceAvailableEvent(resource=self.db_manager,
                                   resource_type=BaseManager),
            BroadcastConfig(internal=True),
        )
Ejemplo n.º 3
0
async def test_request_to_specific_endpoint(server_with_two_clients):
    server, client_a, client_b = server_with_two_clients

    async def handler_a():
        request = await client_a.wait_for(Request)
        await client_a.broadcast(Response("handler-a"),
                                 request.broadcast_config())

    async def handler_b():
        request = await client_b.wait_for(Request)
        await client_b.broadcast(Response("handler-b"),
                                 request.broadcast_config())

    asyncio.ensure_future(handler_a())
    asyncio.ensure_future(handler_b())

    await asyncio.wait_for(
        server.wait_until_all_endpoints_subscribed_to(Request,
                                                      include_self=False),
        timeout=0.1,
    )

    response_a = await asyncio.wait_for(
        server.request(Request("test"),
                       BroadcastConfig(filter_endpoint=client_a.name)),
        timeout=0.1,
    )
    response_b = await asyncio.wait_for(
        server.request(Request("test"),
                       BroadcastConfig(filter_endpoint=client_b.name)),
        timeout=0.1,
    )

    assert response_a.value == "handler-a"
    assert response_b.value == "handler-b"
Ejemplo n.º 4
0
async def test_validator_handle_slot_tick(event_loop, event_bus, monkeypatch):
    alice = await get_validator(
        event_loop=event_loop, event_bus=event_bus, monkeypatch=monkeypatch, indices=[0]
    )

    event_first_tick_called = asyncio.Event()
    event_second_tick_called = asyncio.Event()
    event_third_tick_called = asyncio.Event()

    async def handle_first_tick(slot):
        event_first_tick_called.set()

    async def handle_second_tick(slot):
        event_second_tick_called.set()

    async def handle_third_tick(slot):
        event_third_tick_called.set()

    monkeypatch.setattr(alice, "handle_first_tick", handle_first_tick)
    monkeypatch.setattr(alice, "handle_second_tick", handle_second_tick)
    monkeypatch.setattr(alice, "handle_third_tick", handle_third_tick)

    # sleep for `event_bus` ready
    await asyncio.sleep(0.01)

    # First tick
    await event_bus.broadcast(
        SlotTickEvent(slot=1, elapsed_time=2, tick_type=TickType.SLOT_START),
        BroadcastConfig(internal=True),
    )
    await asyncio.wait_for(event_first_tick_called.wait(), timeout=2, loop=event_loop)
    assert event_first_tick_called.is_set()
    assert not event_second_tick_called.is_set()
    assert not event_third_tick_called.is_set()
    event_first_tick_called.clear()

    # Second tick
    await event_bus.broadcast(
        SlotTickEvent(slot=1, elapsed_time=2, tick_type=TickType.SLOT_ONE_THIRD),
        BroadcastConfig(internal=True),
    )
    await asyncio.wait_for(event_second_tick_called.wait(), timeout=2, loop=event_loop)
    assert not event_first_tick_called.is_set()
    assert event_second_tick_called.is_set()
    assert not event_third_tick_called.is_set()
    event_second_tick_called.clear()

    # Third tick
    await event_bus.broadcast(
        SlotTickEvent(slot=1, elapsed_time=2, tick_type=TickType.SLOT_TWO_THIRD),
        BroadcastConfig(internal=True),
    )
    await asyncio.wait_for(event_third_tick_called.wait(), timeout=2, loop=event_loop)
    assert not event_first_tick_called.is_set()
    assert not event_second_tick_called.is_set()
    assert event_third_tick_called.is_set()
Ejemplo n.º 5
0
async def test_validator_handle_slot_tick(event_loop, event_bus, monkeypatch):
    alice = await get_validator(event_loop=event_loop,
                                event_bus=event_bus,
                                indices=[0])

    event_first_tick_called = asyncio.Event()
    event_second_tick_called = asyncio.Event()

    async def handle_first_tick(slot):
        event_first_tick_called.set()

    async def handle_second_tick(slot):
        event_second_tick_called.set()

    monkeypatch.setattr(alice, 'handle_first_tick', handle_first_tick)
    monkeypatch.setattr(alice, 'handle_second_tick', handle_second_tick)

    # sleep for `event_bus` ready
    await asyncio.sleep(0.01)

    # First tick
    await event_bus.broadcast(
        SlotTickEvent(
            slot=1,
            elapsed_time=2,
            is_second_tick=False,
        ),
        BroadcastConfig(internal=True),
    )
    await asyncio.wait_for(
        event_first_tick_called.wait(),
        timeout=2,
        loop=event_loop,
    )
    assert not event_second_tick_called.is_set()
    event_first_tick_called.clear()

    # Second tick
    await event_bus.broadcast(
        SlotTickEvent(
            slot=1,
            elapsed_time=2,
            is_second_tick=True,
        ),
        BroadcastConfig(internal=True),
    )
    await asyncio.wait_for(
        event_second_tick_called.wait(),
        timeout=2,
        loop=event_loop,
    )
    assert not event_first_tick_called.is_set()
Ejemplo n.º 6
0
    async def _keep_ticking(self) -> None:
        """
        Ticker should tick twice in one slot:
        one for a new slot, one for the second half of an already ticked slot,
        e.g., if `seconds_per_slot` is `6`, for slot `49` it should tick once
        for the first 3 seconds and once for the last 3 seconds.
        """
        # `has_sent_second_half_slot_tick` is used to prevent another tick
        # for the second half of a ticked slot.
        has_sent_second_half_slot_tick = False
        while self.is_operational:
            elapsed_time = Second(int(time.time()) - self.genesis_time)
            if elapsed_time >= self.seconds_per_slot:
                slot = Slot(elapsed_time // self.seconds_per_slot +
                            self.genesis_slot)
                is_second_tick = ((elapsed_time % self.seconds_per_slot) >=
                                  (self.seconds_per_slot / 2))
                # Case 1: new slot
                if slot > self.latest_slot:
                    self.logger.debug(
                        bold_green("Tick  this_slot=%s elapsed=%s"),
                        slot,
                        elapsed_time,
                    )
                    self.latest_slot = slot
                    await self.event_bus.broadcast(
                        SlotTickEvent(
                            slot=slot,
                            elapsed_time=elapsed_time,
                            is_second_tick=is_second_tick,
                        ),
                        BroadcastConfig(internal=True),
                    )
                    has_sent_second_half_slot_tick = is_second_tick
                # Case 2: second half of an already ticked slot and it hasn't tick yet
                elif is_second_tick and not has_sent_second_half_slot_tick:
                    self.logger.debug(
                        bold_green("Tick  this_slot=%s (second-tick)"), slot)
                    await self.event_bus.broadcast(
                        SlotTickEvent(
                            slot=slot,
                            elapsed_time=elapsed_time,
                            is_second_tick=is_second_tick,
                        ),
                        BroadcastConfig(internal=True),
                    )
                    has_sent_second_half_slot_tick = True

            await asyncio.sleep(self.seconds_per_slot //
                                DEFAULT_CHECK_FREQUENCY)
async def test_connection_tracker_server_and_client(event_loop, event_bus):
    tracker = MemoryConnectionTracker()
    remote_a = NodeFactory()
    await tracker.record_blacklist(remote_a, 60, "testing")

    assert await tracker.should_connect_to(remote_a) is False

    service = ConnectionTrackerServer(event_bus, tracker)

    # start the server
    asyncio.ensure_future(service.run(), loop=event_loop)
    await service.events.started.wait()

    config = BroadcastConfig(filter_endpoint=NETWORKING_EVENTBUS_ENDPOINT)
    bus_tracker = ConnectionTrackerClient(event_bus, config=config)

    # ensure we can read from the tracker over the event bus
    assert await bus_tracker.should_connect_to(remote_a) is False

    # ensure we can write to the tracker over the event bus
    remote_b = NodeFactory()

    assert await bus_tracker.should_connect_to(remote_b) is True

    await bus_tracker.record_blacklist(remote_b, 60, "testing")

    assert await bus_tracker.should_connect_to(remote_b) is False
    assert await tracker.should_connect_to(remote_b) is False
Ejemplo n.º 8
0
    async def _run(self) -> None:
        async with AsyncioEndpoint.serve(self._connection_config) as endpoint:
            self._endpoint = endpoint
            # signal that the endpoint is now available
            self._endpoint_available.set()

            # run background task that automatically connects to newly announced endpoints
            self.run_daemon_task(self._auto_connect_new_announced_endpoints())

            # connect to the *main* endpoint which communicates information
            # about other endpoints that come online.
            main_endpoint_config = ConnectionConfig.from_name(
                MAIN_EVENTBUS_ENDPOINT,
                self._trinity_config.ipc_dir,
            )
            await endpoint.connect_to_endpoints(main_endpoint_config)

            # announce ourself to the event bus
            await endpoint.wait_until_endpoint_subscribed_to(
                MAIN_EVENTBUS_ENDPOINT,
                EventBusConnected,
            )
            await endpoint.broadcast(
                EventBusConnected(self._connection_config),
                BroadcastConfig(filter_endpoint=MAIN_EVENTBUS_ENDPOINT))

            # run until the endpoint exits
            await self.cancellation()
Ejemplo n.º 9
0
async def test_validator_handle_slot_tick(event_loop, event_bus, monkeypatch):
    alice = await get_validator(event_loop=event_loop,
                                event_bus=event_bus,
                                index=0)

    event_new_slot_called = asyncio.Event()

    async def propose_or_skip_block(slot, is_second_tick):
        event_new_slot_called.set()

    monkeypatch.setattr(alice, 'propose_or_skip_block', propose_or_skip_block)

    # sleep for `event_bus` ready
    await asyncio.sleep(0.01)

    await event_bus.broadcast(
        SlotTickEvent(
            slot=1,
            elapsed_time=2,
            is_second_tick=False,
        ),
        BroadcastConfig(internal=True),
    )
    await asyncio.wait_for(
        event_new_slot_called.wait(),
        timeout=2,
        loop=event_loop,
    )
async def test_connection_tracker_server_and_client(event_loop, event_bus):
    tracker = MemoryConnectionTracker()
    remote_a = NodeFactory()
    tracker.record_blacklist(remote_a, 60, "testing")

    blacklisted_ids = await tracker.get_blacklisted()
    assert remote_a.id in blacklisted_ids

    service = ConnectionTrackerServer(event_bus, tracker)

    # start the server
    async with background_asyncio_service(service):
        config = BroadcastConfig(filter_endpoint=NETWORKING_EVENTBUS_ENDPOINT)
        bus_tracker = ConnectionTrackerClient(event_bus, config=config)

        # Give `bus_tracker` a moment to setup subscriptions
        await event_bus.wait_until_any_endpoint_subscribed_to(
            GetBlacklistedPeersRequest)
        # ensure we can read from the tracker over the event bus
        bus_blacklisted_ids = await bus_tracker.get_blacklisted()
        assert remote_a.id in bus_blacklisted_ids

        # ensure we can write to the tracker over the event bus
        remote_b = NodeFactory()
        bus_tracker.record_blacklist(remote_b, 60, "testing")
        # let the underlying broadcast_nowait execute
        await asyncio.sleep(0.01)

        bus_blacklisted_ids = await bus_tracker.get_blacklisted()
        blacklisted_ids = await tracker.get_blacklisted()
        assert remote_b.id in blacklisted_ids
        assert bus_blacklisted_ids == blacklisted_ids

        assert sorted(blacklisted_ids) == sorted([remote_a.id, remote_b.id])
Ejemplo n.º 11
0
async def test_validator_handle_new_slot(caplog, event_loop, event_bus,
                                         monkeypatch):
    alice = await get_validator(event_loop=event_loop,
                                event_bus=event_bus,
                                index=0)

    event_new_slot_called = asyncio.Event()

    async def new_slot(slot):
        event_new_slot_called.set()

    monkeypatch.setattr(alice, 'new_slot', new_slot)

    # sleep for `event_bus` ready
    await asyncio.sleep(0.01)

    event_bus.broadcast(
        NewSlotEvent(
            slot=1,
            elapsed_time=2,
        ),
        BroadcastConfig(internal=True),
    )
    await asyncio.wait_for(
        event_new_slot_called.wait(),
        timeout=2,
        loop=event_loop,
    )
Ejemplo n.º 12
0
async def test_internal_propagation(endpoint_pair):
    endpoint_a, endpoint_b = endpoint_pair

    async def do_wait_for(endpoint, event):
        await endpoint.wait_for(Internal)
        event.set()

    got_by_endpoint_a = asyncio.Event()
    got_by_endpoint_b = asyncio.Event()

    asyncio.ensure_future(do_wait_for(endpoint_a, got_by_endpoint_a))
    will_not_finish = asyncio.ensure_future(do_wait_for(endpoint_b, got_by_endpoint_b))

    # give subscriptions time to update
    await endpoint_a.wait_until_all_endpoints_subscribed_to(Internal)
    await endpoint_b.wait_until_all_endpoints_subscribed_to(Internal)

    # now broadcast a few over the internal bus on `A`
    for _ in range(5):
        await endpoint_a.broadcast(Internal(), BroadcastConfig(internal=True))
    await asyncio.sleep(0.01)

    assert got_by_endpoint_a.is_set() is True
    assert got_by_endpoint_b.is_set() is False

    will_not_finish.cancel()
async def test_connection_tracker_server_and_client(event_loop, event_bus):
    tracker = MemoryConnectionTracker()
    remote_a = NodeFactory()
    tracker.record_blacklist(remote_a, 60, "testing")

    assert await tracker.should_connect_to(remote_a) is False

    service = ConnectionTrackerServer(event_bus, tracker)

    # start the server
    asyncio.ensure_future(service.run(), loop=event_loop)
    await service.events.started.wait()

    config = BroadcastConfig(filter_endpoint=NETWORKING_EVENTBUS_ENDPOINT)
    bus_tracker = ConnectionTrackerClient(event_bus, config=config)

    # Give `bus_tracker` a moment to setup subscriptions
    await event_bus.wait_until_any_endpoint_subscribed_to(ShouldConnectToPeerRequest)
    # ensure we can read from the tracker over the event bus
    assert await bus_tracker.should_connect_to(remote_a) is False

    # ensure we can write to the tracker over the event bus
    remote_b = NodeFactory()

    assert await bus_tracker.should_connect_to(remote_b) is True

    bus_tracker.record_blacklist(remote_b, 60, "testing")
    # let the underlying broadcast_nowait execute
    await asyncio.sleep(0.01)

    assert await bus_tracker.should_connect_to(remote_b) is False
    assert await tracker.should_connect_to(remote_b) is False
Ejemplo n.º 14
0
 def request_shutdown(self, reason: str) -> None:
     """
     Perfom a graceful shutdown of Trinity. Can be called from any process.
     """
     self.broadcast_nowait(
         ShutdownRequest(reason),
         BroadcastConfig(filter_endpoint=MAIN_EVENTBUS_ENDPOINT))
Ejemplo n.º 15
0
 def shutdown_host(self, reason: str) -> None:
     """
     Shutdown ``Trinity`` by broadcasting a :class:`~trinity.events.ShutdownRequest` on the
     :class:`~lahja.eventbus.EventBus`. The actual shutdown routine is executed and coordinated
     by the main application process who listens for this event.
     """
     self.event_bus.broadcast(
         ShutdownRequest(reason),
         BroadcastConfig(filter_endpoint=MAIN_EVENTBUS_ENDPOINT))
Ejemplo n.º 16
0
 async def announce_endpoint(self) -> None:
     """
     Announce this endpoint to the :class:`~trinity.endpoint.TrinityMainEventBusEndpoint` so
     that it will be further propagated to all other endpoints, allowing them to connect to us.
     """
     await self.broadcast(
         EventBusConnected(
             ConnectionConfig(name=self.name, path=self.ipc_path)),
         BroadcastConfig(filter_endpoint=MAIN_EVENTBUS_ENDPOINT))
Ejemplo n.º 17
0
async def test_ipc(
    w3,
    tester,
    num_blocks_confirmed,
    polling_period,
    endpoint_server,
    endpoint_client,
    eth1_monitor,
    func_do_deposit,
):
    func_do_deposit()
    tester.mine_blocks(num_blocks_confirmed)
    await trio.sleep(polling_period)
    await wait_all_tasks_blocked()

    broadcast_config = BroadcastConfig(endpoint_server.name)

    async def request(request_type, **kwargs):
        await endpoint_client.wait_until_any_endpoint_subscribed_to(
            request_type)
        resp = await endpoint_client.request(request_type(**kwargs),
                                             broadcast_config)
        return resp.to_data()

    # Result from IPC should be the same as the direct call with the same args.

    # Test: `get_deposit`
    # Succeeds
    get_deposit_kwargs = {"deposit_count": 1, "deposit_index": 0}
    assert eth1_monitor._get_deposit(**get_deposit_kwargs) == (await request(
        GetDepositRequest, **get_deposit_kwargs))
    # Fails
    get_deposit_fails_kwargs = {"deposit_count": 0, "deposit_index": 0}
    with pytest.raises(Eth1MonitorValidationError):
        await request(GetDepositRequest, **get_deposit_fails_kwargs)

    # Test: `get_eth1_data`
    get_eth1_data_kwargs = {
        "distance":
        0,
        "eth1_voting_period_start_timestamp":
        w3.eth.getBlock("latest")["timestamp"],
    }
    # Succeeds
    assert eth1_monitor._get_eth1_data(
        **get_eth1_data_kwargs) == (await request(GetEth1DataRequest,
                                                  **get_eth1_data_kwargs))
    # Fails
    get_eth1_data_kwargs_fails = {
        "distance":
        1,
        "eth1_voting_period_start_timestamp":
        w3.eth.getBlock("latest")["timestamp"],
    }
    with pytest.raises(Eth1MonitorValidationError):
        await request(GetEth1DataRequest, **get_eth1_data_kwargs_fails)
Ejemplo n.º 18
0
async def test_trio_endpoint_subscribe(endpoint_pair):
    alice, bob = endpoint_pair

    results = []

    alice.subscribe(EventTest, results.append)

    await bob.wait_until_endpoint_subscribed_to(alice.name, EventTest)

    await bob.broadcast(EventTest())
    await bob.broadcast(EventUnexpected(), BroadcastConfig(require_subscriber=False))
    await bob.broadcast(EventInherited(), BroadcastConfig(require_subscriber=False))
    await bob.broadcast(EventTest())

    # enough cycles to allow the alice to process the event
    await trio.sleep(0.05)

    assert len(results) == 2
    assert all(type(event) is EventTest for event in results)
Ejemplo n.º 19
0
async def test_request_can_get_cancelled(endpoint_pair):
    alice, bob = endpoint_pair

    item = Request("test")
    with pytest.raises(asyncio.TimeoutError):
        await asyncio.wait_for(
            alice.request(item, BroadcastConfig(require_subscriber=False)), 0.0001
        )
    await asyncio.sleep(0.01)
    # Ensure the registration was cleaned up
    assert item._id not in alice._futures
Ejemplo n.º 20
0
    def notify_resource_available(self) -> None:

        # We currently need this to give plugins the chance to start as soon
        # as the `PeerPool` is available. In the long term, the peer pool may become
        # a plugin itself and we can get rid of this.
        peer_pool = self.get_peer_pool()

        self.event_bus.broadcast(
            ResourceAvailableEvent(resource=(peer_pool, self.cancel_token),
                                   resource_type=type(peer_pool)),
            BroadcastConfig(internal=True),
        )

        # This broadcasts the *local* chain, which is suited for tasks that aren't blocking
        # for too long. There may be value in also broadcasting the proxied chain.
        self.event_bus.broadcast(
            ResourceAvailableEvent(resource=self.get_chain(),
                                   resource_type=BaseChain),
            BroadcastConfig(internal=True),
        )
Ejemplo n.º 21
0
    async def resetToGenesisFixture(self, chain_info: Any) -> ChainAPI:
        """
        This method is a special case. It returns a new chain object
        which is then replaced inside :class:`~trinity.rpc.main.RPCServer`
        for all future calls.
        """
        chain = new_chain_from_fixture(chain_info, type(self.chain))

        await self.event_bus.broadcast(ChainReplacementEvent(chain),
                                       BroadcastConfig(internal=True))

        return chain
async def test_trio_endpoint_request_and_response(endpoint_pair):
    alice, bob = endpoint_pair

    async with trio.open_nursery() as nursery:
        nursery.start_soon(_handle_double_request, alice)

        config = BroadcastConfig(alice.name)

        await bob.wait_until_endpoint_subscribed_to(alice.name, DoubleRequest)

        response = await bob.request(DoubleRequest(7), config)
        assert isinstance(response, DoubleResponse)
        assert response.result == 14
Ejemplo n.º 23
0
    async def worker(cls, name: str, config: ConsumerConfig) -> None:
        conn_config = ConnectionConfig.from_name(name)
        async with config.backend.Endpoint.serve(conn_config) as event_bus:
            await event_bus.connect_to_endpoints(
                ConnectionConfig.from_name(REPORTER_ENDPOINT))
            await event_bus.wait_until_connected_to(DRIVER_ENDPOINT)
            stats = await cls.do_consumer(event_bus, config)

            await event_bus.wait_until_endpoint_subscribed_to(
                REPORTER_ENDPOINT, TotalRecordedEvent)

            await event_bus.broadcast(
                TotalRecordedEvent(stats.crunch(event_bus.name)),
                BroadcastConfig(filter_endpoint=REPORTER_ENDPOINT),
            )
Ejemplo n.º 24
0
def test_broadcast_without_listeners_explicitly_allowed(ipc_base_path, runner):
    server_config = ConnectionConfig.from_name("server", base_path=ipc_base_path)
    server_done, client_done = d.checkpoint("done")

    server = d.driver(d.serve_endpoint(server_config), server_done)

    client = d.driver(
        d.run_endpoint("client"),
        d.connect_to_endpoints(server_config),
        d.wait_until_connected_to("server"),
        d.broadcast(Event(), BroadcastConfig(require_subscriber=False)),
        client_done,
    )

    runner(server, client)
Ejemplo n.º 25
0
 async def _broadcast_slot_tick_event(self, slot: Slot,
                                      elapsed_time: Second,
                                      tick_type: TickType) -> None:
     self.logger.debug(
         bold_white("[%s] tick at %ss of slot #%s, total elapsed %ds"),
         tick_type,
         elapsed_time % self.seconds_per_slot,
         slot,
         elapsed_time,
     )
     await self.event_bus.broadcast(
         SlotTickEvent(slot=slot,
                       elapsed_time=elapsed_time,
                       tick_type=tick_type),
         BroadcastConfig(internal=True),
     )
Ejemplo n.º 26
0
    async def worker(backend: BaseBackend, name: str, num_events: int) -> None:
        config = ConnectionConfig.from_name(name)
        async with backend.Endpoint.serve(config) as event_bus:
            await event_bus.connect_to_endpoints(
                ConnectionConfig.from_name(REPORTER_ENDPOINT)
            )
            await event_bus.wait_until_all_remotes_subscribed_to(TotalRecordedEvent)

            stats = LocalStatistic()
            events = event_bus.stream(PerfMeasureEvent, num_events=num_events)
            async for event in events:
                stats.add(
                    RawMeasureEntry(sent_at=event.sent_at, received_at=time.time())
                )

            await event_bus.broadcast(
                TotalRecordedEvent(stats.crunch(event_bus.name)),
                BroadcastConfig(filter_endpoint=REPORTER_ENDPOINT),
            )
Ejemplo n.º 27
0
 async def _keep_ticking(self) -> None:
     while self.is_operational:
         elapsed_time = Second(int(time.time()) - self.genesis_time)
         if elapsed_time >= self.seconds_per_slot:
             slot = Slot(elapsed_time // self.seconds_per_slot +
                         self.genesis_slot)
             if slot > self.latest_slot:
                 self.logger.debug(
                     bold_green(
                         f"New slot: {slot}\tElapsed time: {elapsed_time}"))
                 self.latest_slot = slot
                 self.event_bus.broadcast(
                     NewSlotEvent(
                         slot=slot,
                         elapsed_time=elapsed_time,
                     ),
                     BroadcastConfig(internal=True),
                 )
         await asyncio.sleep(self.seconds_per_slot //
                             DEFAULT_CHECK_FREQUENCY)
Ejemplo n.º 28
0
async def test_broadcasts_to_specific_endpoint(triplet_of_endpoints):

    endpoint1, endpoint2, endpoint3 = triplet_of_endpoints

    tracker = Tracker()

    await endpoint1.subscribe(DummyRequestPair,
                              tracker.track_and_broadcast_dummy(1, endpoint1))

    await endpoint2.subscribe(DummyRequestPair,
                              tracker.track_and_broadcast_dummy(2, endpoint1))

    await endpoint3.wait_until_all_remotes_subscribed_to(DummyRequestPair)

    item = DummyRequestPair()
    response = await endpoint3.request(
        item, BroadcastConfig(filter_endpoint=endpoint1.name))
    assert isinstance(response, DummyResponse)
    assert tracker.exists(1)
    assert not tracker.exists(2)
    # Ensure the registration was cleaned up
    assert item._id not in endpoint3._futures
Ejemplo n.º 29
0
    async def worker(logger: logging.Logger,
                     config: ReportingProcessConfig) -> None:
        conn_config = ConnectionConfig.from_name(REPORTER_ENDPOINT)
        async with config.backend.Endpoint.serve(conn_config) as event_bus:
            await event_bus.connect_to_endpoints(
                ConnectionConfig.from_name(ROOT_ENDPOINT))

            global_statistic = GlobalStatistic()
            events = event_bus.stream(TotalRecordedEvent,
                                      num_events=config.num_processes)
            async for event in events:
                global_statistic.add(event.total)

            print_full_report(
                logger,
                config.backend,
                config.num_processes,
                config.num_events,
                global_statistic,
            )
            await event_bus.broadcast(
                ShutdownEvent(),
                BroadcastConfig(filter_endpoint=ROOT_ENDPOINT))
Ejemplo n.º 30
0
async def test_broadcasts_to_specific_endpoint(server_with_two_clients):

    server, client_a, client_b = server_with_two_clients

    return_queue = asyncio.Queue()

    client_a.subscribe(BroadcastEvent, return_queue.put_nowait)
    client_b.subscribe(BroadcastEvent, return_queue.put_nowait)
    client_a.subscribe(TailEvent, return_queue.put_nowait)
    client_b.subscribe(TailEvent, return_queue.put_nowait)

    await server.wait_until_all_endpoints_subscribed_to(BroadcastEvent,
                                                        include_self=False)
    await server.wait_until_all_endpoints_subscribed_to(TailEvent,
                                                        include_self=False)

    # broadcast once targeted at a specific endpoint
    await server.broadcast(BroadcastEvent(),
                           BroadcastConfig(filter_endpoint=client_a.name))
    # broadcast again to all endpoints
    await server.broadcast(TailEvent())

    # get what should be all items out of the queues.
    # by feeding the second set of events that go through both receiving
    # endpoints we can know that our first event isn't sitting somewhere
    # waiting to be processed since it was broadcast ahead of the tail events
    result = await return_queue.get()
    tail_event_a = await return_queue.get()
    tail_event_b = await return_queue.get()

    assert isinstance(result, BroadcastEvent)
    assert isinstance(tail_event_a, TailEvent)
    assert isinstance(tail_event_b, TailEvent)

    # ensure the queues are empty as they should be.
    assert return_queue.qsize() == 0