def _run(self):
        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)  # Related to the current thread only.
        self._loop = loop

        async def enter_stack(stack):
            manager = await stack.enter_async_context(
                AgileMeshNetworkManager(**self._amn_kwargs)
            )
            return manager

        stack = AsyncExitStack()
        try:
            self._manager = loop.run_until_complete(enter_stack(stack))
            self._thread_started_event.set()
            self._start_initialization_event.wait()
            self._manager.start_initialization()
            loop.run_forever()
        except Exception as e:
            logger.error("Uncaught exception in the AMN event loop", exc_info=True)
            # TODO shutdown the app? It's harmful to keep it alive at this point.
            # Obviously, everything is horribly broken: there's no manager
            # controlling the app.
        finally:
            logger.info("Shutting down AMN event loop")
            loop.run_until_complete(stack.aclose())
            loop.run_until_complete(loop.shutdown_asyncgens())
            pending = asyncio.Task.all_tasks()
            loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
            loop.close()
Esempio n. 2
0
    def setUp(self):
        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)

        self.server = MockupDB(auto_ismaster={"maxWireVersion": 6})
        self.server.run()
        self.server.autoresponds(
            Command("find", "switch_collection",
                    namespace="topology_database"),
            {
                "cursor": {
                    "id":
                    0,
                    "firstBatch": [{
                        **d, "_id": i
                    } for i, d in enumerate(TOPOLOGY_DATABASE_DATA)],
                }
            },
        )

        self._stack = AsyncExitStack()

        td = self._stack.enter_context(tempfile.TemporaryDirectory())
        self.rpc_unix_sock = os.path.join(td, "l.sock")

        self._stack.enter_context(
            patch.object(settings, "REMOTE_DATABASE_MONGO_URI",
                         self.server.uri))
        self._stack.enter_context(
            patch.object(settings, "NEGOTIATOR_RPC_UNIX_SOCK_PATH",
                         self.rpc_unix_sock))
        self._stack.enter_context(
            patch("agile_mesh_network.ryu.amn_manager.OVSManager",
                  DummyOVSManager))
        self._stack.enter_context(
            # To avoid automatic connection to a relay.
            patch.object(settings, "IS_RELAY", True))

        self._stack.enter_context(
            patch.object(events_scheduler, "RyuAppEventLoopScheduler"))
        self.ryu_ev_loop_scheduler = events_scheduler.RyuAppEventLoopScheduler(
        )
        self._stack.enter_context(self.ryu_ev_loop_scheduler)

        async def command_cb(session, msg):
            assert isinstance(msg, RPCCommand)
            await self._rpc_command_cb(msg)

        self.rpc_server = self.loop.run_until_complete(
            self._stack.enter_async_context(
                RPCUnixServer(self.rpc_unix_sock, command_cb)))
Esempio n. 3
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        trinity_config = boot_info.trinity_config

        chain = chain_for_config(trinity_config, event_bus)

        if trinity_config.has_app_config(Eth1AppConfig):
            modules = initialize_eth1_modules(chain, event_bus)
        elif trinity_config.has_app_config(BeaconAppConfig):
            modules = initialize_beacon_modules(chain, event_bus)
        else:
            raise Exception("Unsupported Node Type")

        rpc = RPCServer(modules, chain, event_bus)

        # Run IPC Server
        ipc_server = IPCServer(rpc, boot_info.trinity_config.jsonrpc_ipc_path)
        services_to_exit: Tuple[BaseService, ...] = (ipc_server, )

        # Run HTTP Server
        if boot_info.args.enable_http:
            http_server = HTTPServer(rpc, port=boot_info.args.rpcport)
            services_to_exit += (http_server, )

        async with AsyncExitStack() as stack:
            for service in services_to_exit:
                await stack.enter_async_context(run_service(service))
            await ipc_server.cancellation()
Esempio n. 4
0
async def amain(args, group_config, loop):  # pragma: no cover
    logger = logging.getLogger('MAIN')
    exit_event = asyncio.Event()
    sig_handler = partial(utils.exit_handler, exit_event)
    signal.signal(signal.SIGTERM, sig_handler)
    signal.signal(signal.SIGINT, sig_handler)
    beat = asyncio.ensure_future(utils.heartbeat())

    mixer = crypto.StatefulHKDFEntropyMixer()
    nodes = [
        net.Identity(I['Address'], I['Key'], I['TLS'])
        for I in group_config["Nodes"]
    ]
    with ProcessPoolExecutor() as pool_executor:
        sources = [
            net.DrandRESTSource(identity, args.timeout, pool=pool_executor)
            for identity in nodes
        ]
        async with AsyncExitStack() as stack:
            await asyncio.gather(*(stack.enter_async_context(source)
                                   for source in sources))
            async with net.PollingSource(sources,
                                         mixer,
                                         quorum=args.quorum,
                                         period=args.period,
                                         backoff=args.backoff) as aggregate:
                async with args.output.value(aggregate) as output:
                    async with AsyncSystemdNotifier() as notifier:
                        await notifier.notify(b"READY=1")
                        await exit_event.wait()

                        logger.debug(
                            "Eventloop interrupted. Shutting down server...")
                        await notifier.notify(b"STOPPING=1")
    beat.cancel()
Esempio n. 5
0
    async def _run(self) -> None:
        async with AsyncExitStack() as stack:
            await stack.enter_async_context(P2PAPI().as_behavior().apply(
                self.connection))
            self.p2p_api = self.connection.get_logic('p2p', P2PAPI)

            for behavior in self.get_behaviors():
                if behavior.should_apply_to(self.connection):
                    await stack.enter_async_context(
                        behavior.apply(self.connection))

            # setup handler for protocol messages to pass messages to subscribers
            for protocol in self.connection.get_protocols():
                self.connection.add_protocol_handler(
                    type(protocol),
                    self._handle_subscriber_message,
                )

            self.setup_protocol_handlers()

            # The `boot` process is run in the background to allow the `run` loop
            # to continue so that all of the Peer APIs can be used within the
            # `boot` task.
            self.run_child_service(self.boot_manager)

            # Trigger the connection to start feeding messages though the handlers
            self.connection.start_protocol_streams()
            self.ready.set()

            await self.cancellation()
Esempio n. 6
0
async def test_proxy_peer_requests(request, event_bus, other_event_bus,
                                   event_loop, chaindb_20, client_and_server):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            background_asyncio_service(
                ETHRequestServer(server_event_bus,
                                 TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_20.db))))

        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(client_proxy_peer_pool))

        proxy_peer_pool = ETHProxyPeerPool(server_event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        headers = await proxy_peer.eth_api.get_block_headers(0, 1, 0, False)

        assert len(headers) == 1
        block_header = headers[0]
        assert block_header.block_number == 0

        receipts = await proxy_peer.eth_api.get_receipts(headers)
        assert len(receipts) == 1
        receipt = receipts[0]
        assert receipt[1][0] == block_header.receipt_root

        block_bundles = await proxy_peer.eth_api.get_block_bodies(headers)
        assert len(block_bundles) == 1
        first_bundle = block_bundles[0]
        assert first_bundle[1][0] == block_header.transaction_root

        node_data = await proxy_peer.eth_api.get_node_data(
            (block_header.state_root, ))
        assert node_data[0][0] == block_header.state_root
Esempio n. 7
0
async def p2pds(
    num_p2pds,
    security_protocol,
    is_gossipsub,
    is_pubsub_signing,
    is_pubsub_signing_strict,
):
    async with AsyncExitStack() as stack:
        p2pds = [
            await stack.enter_async_context(
                make_p2pd(
                    get_unused_tcp_port(),
                    get_unused_tcp_port(),
                    security_protocol,
                    is_gossipsub=is_gossipsub,
                    is_pubsub_signing=is_pubsub_signing,
                    is_pubsub_signing_strict=is_pubsub_signing_strict,
                )
            )
            for _ in range(num_p2pds)
        ]
        try:
            yield p2pds
        finally:
            for p2pd in p2pds:
                await p2pd.close()
Esempio n. 8
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:

        metrics_service = MetricsService(
            influx_server=boot_info.args.metrics_influx_server,
            influx_user=boot_info.args.metrics_influx_user,
            influx_password=boot_info.args.metrics_influx_password,
            influx_database=boot_info.args.metrics_influx_database,
            host=boot_info.args.metrics_host,
            reporting_frequency=boot_info.args.metrics_reporting_frequency,
        )

        # types ignored due to https://github.com/ethereum/async-service/issues/5
        system_metrics_collector = collect_process_metrics(  # type: ignore
            metrics_service.registry,
            frequency_seconds=boot_info.args.metrics_system_collector_frequency
        )

        services_to_exit = (metrics_service, system_metrics_collector,)

        async with AsyncExitStack() as stack:
            managers = tuple([
                await stack.enter_async_context(background_trio_service(service))
                for service in services_to_exit
            ])
            await managers[0].wait_finished()
Esempio n. 9
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        trinity_config = boot_info.trinity_config

        with chain_for_config(trinity_config, event_bus) as chain:
            if trinity_config.has_app_config(Eth1AppConfig):
                modules = initialize_eth1_modules(chain, event_bus)
            elif trinity_config.has_app_config(BeaconAppConfig):
                modules = initialize_beacon_modules(chain, event_bus)
            else:
                raise Exception("Unsupported Node Type")

            rpc = RPCServer(modules, chain, event_bus)

            # Run IPC Server
            ipc_server = IPCServer(rpc,
                                   boot_info.trinity_config.jsonrpc_ipc_path)
            services_to_exit: Tuple[Service, ...] = (ipc_server, )

            # Run HTTP Server
            if boot_info.args.enable_http:
                http_server = HTTPServer(
                    handler=RPCHandler.handle(rpc.execute),
                    port=boot_info.args.rpcport,
                )
                services_to_exit += (http_server, )

            async with AsyncExitStack() as stack:
                managers = tuple([
                    await stack.enter_async_context(
                        background_asyncio_service(service))
                    for service in services_to_exit
                ])
                await managers[0].wait_finished()
Esempio n. 10
0
    async def start_loading(self):
        if self.is_loading:
            return
        if self.is_completed:
            assert await self.is_cached_locally(
            ), "Cache file %s is gone." % self.local_path
            return

        await self.aclose()  # ensure old Future was properly awaited first
        async with AsyncExitStack() as stack:
            await stack.enter_async_context(self.state_manager())

            if await self.is_cached_locally():
                self.state = DownloadState.DONE
                return
            await self.validate_headers()

            aiofile = await stack.enter_async_context(
                aiofiles.open(self.local_path, "wb", buffering=0))
            await aiofile.truncate(self.total_length)
            await aiofile.seek(0, os.SEEK_SET)

            resp = await stack.enter_async_context(
                self.http_session.get(self.url, chunked=True))
            resp.raise_for_status()

            self.future = asyncio.ensure_future(
                self.run_in_background(stack.pop_all(), aiofile, resp),
                # After stack.pop_all(), the former exit stack will be contained in the self.future/run_in_background() 'closure'
                # and reliably cleaned up by self.aclose() right after the background Task completed.
                loop=self.loop)
Esempio n. 11
0
    async def test_async_callback(self):
        expected = [
            ((), {}),
            ((1,), {}),
            ((1,2), {}),
            ((), dict(example=1)),
            ((1,), dict(example=1)),
            ((1,2), dict(example=1)),
        ]
        result = []
        async def _exit(*args, **kwds):
            """Test metadata propagation"""
            result.append((args, kwds))

        async with AsyncExitStack() as stack:
            for args, kwds in reversed(expected):
                if args and kwds:
                    f = stack.push_async_callback(_exit, *args, **kwds)
                elif args:
                    f = stack.push_async_callback(_exit, *args)
                elif kwds:
                    f = stack.push_async_callback(_exit, **kwds)
                else:
                    f = stack.push_async_callback(_exit)
                self.assertIs(f, _exit)
            for wrapper in stack._exit_callbacks:
                self.assertIs(wrapper[1].__wrapped__, _exit)
                self.assertNotEqual(wrapper[1].__name__, _exit.__name__)
                self.assertIsNone(wrapper[1].__doc__, _exit.__doc__)

        self.assertEqual(result, expected)
Esempio n. 12
0
async def test_proxy_peer_requests_with_timeouts(request, event_bus,
                                                 other_event_bus, event_loop,
                                                 client_and_server):

    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))
        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        # We just want an ETHRequestServer that doesn't answer us but we still have to run
        # *something* to at least subscribe to the events. Otherwise Lahja's safety check will yell
        # at us for sending requests into the void.
        for event_type in ETHRequestServer(None, None,
                                           None)._subscribed_events:
            server_event_bus.subscribe(event_type, lambda _: None)

        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(client_proxy_peer_pool))

        server_proxy_peer_pool = ETHProxyPeerPool(
            server_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(server_proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_block_headers(0,
                                                       1,
                                                       0,
                                                       False,
                                                       timeout=0.01)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_receipts((), timeout=0.01)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_block_bodies((), timeout=0.01)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_node_data((), timeout=0.01)
Esempio n. 13
0
async def main_async_exit_stack(*, tcp_port, socket_path):
    loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
    stack = AsyncExitStack()
    tunnels_state = await stack.enter_async_context(TunnelsState(loop=loop))
    await stack.enter_async_context(
        RPCResponder(tunnels_state, socket_path=socket_path))
    await stack.enter_async_context(
        TCPExteriorServer(tunnels_state, tcp_port=tcp_port))
    return stack
Esempio n. 14
0
    def __init__(self, *, ryu_ev_loop_scheduler: RyuAppEventLoopScheduler) -> None:
        self.ryu_ev_loop_scheduler = ryu_ev_loop_scheduler
        self.topology_database = TopologyDatabase()
        self.negotiator_rpc = NegotiatorRPC(settings.NEGOTIATOR_RPC_UNIX_SOCK_PATH)
        self.ovs_manager = OVSManager(
            datapath_id=settings.OVS_DATAPATH_ID,
            # TODO ryu_app.CONF?
        )

        self._stack = AsyncExitStack()
        self._initialization_task = None
        self._tunnel_creation_tasks: Mapping[asyncio.Future, asyncio.Future] = {}

        self.topology_database.add_local_db_synced_callback(self._event_db_synced)

        self.negotiator_rpc.add_tunnels_changed_callback(
            self._event_negotiator_tunnels_update
        )
Esempio n. 15
0
    def __init__(self, *, tmpdir: Optional[str] = None):
        self.__exit_stack = AsyncExitStack()

        # Directory for temporal files created by this provider
        self._temp_dir: Optional[Path] = Path(tmpdir) if tmpdir else None

        # Mapping of URLs to info on files published with this URL
        self._published_sources: Dict[str, GftpProvider.URLInfo] = dict()

        # Lock used to synchronize access to self._published_sources
        self._lock: asyncio.Lock = asyncio.Lock()

        # Flag indicating if this `GftpProvider` will close unpublished URLs.
        # See this class' docstring for more details.
        self._close_urls: Optional[bool] = read_use_gftp_close_env_var()

        # Reference to an external process running the `gftp server` command
        self._process: Optional["__Process"] = None
Esempio n. 16
0
    async def setUp(self):
        async with AsyncExitStack() as stack:
            self._db = await stack.enter_async_context(
                wd.connect('sqlite:///:memory:'))
            self._stack = stack.pop_all()

        async with wd.ReadWrite(self._db) as query:
            for sql in u.SQL_CREATE_TABLE:
                await query.execute(sql)
Esempio n. 17
0
    async def create_batch_with_gossipsub(
        cls,
        number: int,
        *,
        cache_size: int = None,
        strict_signing: bool = False,
        protocols: Sequence[TProtocol] = None,
        degree: int = GOSSIPSUB_PARAMS.degree,
        degree_low: int = GOSSIPSUB_PARAMS.degree_low,
        degree_high: int = GOSSIPSUB_PARAMS.degree_high,
        time_to_live: int = GOSSIPSUB_PARAMS.time_to_live,
        gossip_window: int = GOSSIPSUB_PARAMS.gossip_window,
        gossip_history: int = GOSSIPSUB_PARAMS.gossip_history,
        heartbeat_interval: float = GOSSIPSUB_PARAMS.heartbeat_interval,
        heartbeat_initial_delay: float = GOSSIPSUB_PARAMS.
        heartbeat_initial_delay,
        security_protocol: TProtocol = None,
        muxer_opt: TMuxerOptions = None,
        msg_id_constructor: Callable[[rpc_pb2.Message],
                                     bytes] = get_peer_and_seqno_msg_id,
    ) -> AsyncIterator[Tuple[Pubsub, ...]]:
        if protocols is not None:
            gossipsubs = GossipsubFactory.create_batch(
                number,
                protocols=protocols,
                degree=degree,
                degree_low=degree_low,
                degree_high=degree_high,
                time_to_live=time_to_live,
                gossip_window=gossip_window,
                heartbeat_interval=heartbeat_interval,
            )
        else:
            gossipsubs = GossipsubFactory.create_batch(
                number,
                degree=degree,
                degree_low=degree_low,
                degree_high=degree_high,
                time_to_live=time_to_live,
                gossip_window=gossip_window,
                heartbeat_interval=heartbeat_interval,
            )

        async with cls._create_batch_with_router(
                number,
                gossipsubs,
                cache_size,
                strict_signing,
                security_protocol=security_protocol,
                muxer_opt=muxer_opt,
                msg_id_constructor=msg_id_constructor,
        ) as pubsubs:
            async with AsyncExitStack() as stack:
                for router in gossipsubs:
                    await stack.enter_async_context(
                        background_trio_service(router))
                yield pubsubs
Esempio n. 18
0
async def test_requests_when_peer_in_client_vanishs(request, event_bus,
                                                    other_event_bus,
                                                    event_loop, chaindb_20,
                                                    client_and_server):

    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))
        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            background_asyncio_service(
                ETHRequestServer(server_event_bus,
                                 TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_20.db))))
        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(client_proxy_peer_pool))

        server_proxy_peer_pool = ETHProxyPeerPool(
            server_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(server_proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        # We remove the peer from the client and assume to see PeerConnectionLost exceptions raised
        client_peer_pool.connected_nodes.pop(client_peer.session)

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_block_headers(0, 1, 0, False)

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_receipts(())

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_block_bodies(())

        with pytest.raises(PeerConnectionLost):
            await proxy_peer.eth_api.get_node_data(())
Esempio n. 19
0
async def two_connected_tx_pools(event_bus, other_event_bus, event_loop,
                                 funded_address_private_key,
                                 chain_with_block_validation, tx_validator,
                                 client_and_server):

    alice_event_bus = event_bus
    bob_event_bus = other_event_bus
    bob, alice = client_and_server

    bob_peer_pool = MockPeerPoolWithConnectedPeers([bob],
                                                   event_bus=bob_event_bus)
    alice_peer_pool = MockPeerPoolWithConnectedPeers([alice],
                                                     event_bus=alice_event_bus)

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(bob_event_bus,
                                       bob_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            run_peer_pool_event_server(alice_event_bus,
                                       alice_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        bob_proxy_peer_pool = ETHProxyPeerPool(bob_event_bus,
                                               TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(bob_proxy_peer_pool))

        alice_proxy_peer_pool = ETHProxyPeerPool(
            alice_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(alice_proxy_peer_pool))

        alice_tx_pool = TxPool(
            alice_event_bus,
            alice_proxy_peer_pool,
            tx_validator,
        )
        await stack.enter_async_context(
            background_asyncio_service(alice_tx_pool))

        bob_tx_pool = TxPool(
            bob_event_bus,
            bob_proxy_peer_pool,
            tx_validator,
        )
        await stack.enter_async_context(background_asyncio_service(bob_tx_pool)
                                        )

        yield (
            alice,
            alice_event_bus,
            alice_tx_pool,
        ), (bob, bob_event_bus, bob_tx_pool)
Esempio n. 20
0
    async def _start_container(self, config: Any):
        # noinspection PyTupleAssignmentBalance
        impls, container_config = self._before_start(config)

        async with AsyncExitStack() as exit_stack:
            yield AsyncContainer(
                impls=impls,
                config=container_config,
                exit_stack=exit_stack,
            )
Esempio n. 21
0
    async def run(self) -> None:
        connection_config = ConnectionConfig.from_name(
            MAIN_EVENTBUS_ENDPOINT,
            self._boot_info.trinity_config.ipc_dir
        )

        async with AsyncioEndpoint.serve(connection_config) as endpoint:
            self._endpoint = endpoint

            # start the background process that tracks and propagates available
            # endpoints to the other connected endpoints
            self.manager.run_daemon_task(self._track_and_propagate_available_endpoints)
            self.manager.run_daemon_task(self._handle_shutdown_request)

            await endpoint.wait_until_any_endpoint_subscribed_to(ShutdownRequest)
            await endpoint.wait_until_any_endpoint_subscribed_to(EventBusConnected)

            # signal the endpoint is up and running and available
            self._endpoint_available.set()

            # instantiate all of the components
            all_components = tuple(
                component_cls(self._boot_info)
                for component_cls
                in self._component_types
            )
            # filter out any components that should not be enabled.
            enabled_components = tuple(
                component
                for component in all_components
                if component.is_enabled
            )

            # a little bit of extra try/finally structure here to produce good
            # logging messages about the component lifecycle.
            try:
                async with AsyncExitStack() as stack:
                    self.logger.info(
                        "Starting components: %s",
                        '/'.join(component.name for component in enabled_components),
                    )
                    # Concurrently start the components.
                    await asyncio.gather(*(
                        stack.enter_async_context(run_component(component))
                        for component in enabled_components
                    ))
                    self.logger.info("Components started")
                    try:
                        await self._trigger_component_exit.wait()
                    finally:
                        self.logger.info("Stopping components")
            finally:
                self.logger.info("Components stopped.")
                self.manager.cancel()
Esempio n. 22
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        try:
            tracker_services = cls._get_services(boot_info, event_bus)
        except BadDatabaseError as err:
            cls.logger.exception(
                f"Unrecoverable error in Network Component: {err}")

        async with AsyncExitStack() as stack:
            for service in tracker_services:
                await stack.enter_async_context(run_service(service))
            await asyncio.gather(*(service.cancellation()
                                   for service in tracker_services))
Esempio n. 23
0
    async def apply(self, connection: ConnectionAPI) -> AsyncIterator[None]:
        self.connection = connection

        async with AsyncExitStack() as stack:
            # First apply all the child behaviors
            for behavior in self._behaviors:
                if behavior.should_apply_to(connection):
                    await stack.enter_async_context(behavior.apply(connection))

            # Now register ourselves with the connection.
            with connection.add_logic(self.name, self):
                yield
Esempio n. 24
0
 async def wait_signals(self, signals):
     __tracebackhide__ = True
     try:
         async with real_clock_timeout():
             async with AsyncExitStack() as stack:
                 for signal in signals:
                     await stack.enter_async_context(
                         qtrio._core.wait_signal_context(signal))
                 yield
     # Supress context in order to simplify the tracebacks in pytest
     except trio.TooSlowError:
         raise trio.TooSlowError from None
        async def f():
            async with AsyncExitStack() as stack:
                tunnels_state = await stack.enter_async_context(
                    TunnelsState(loop=loop))
                await stack.enter_async_context(
                    RPCResponder(tunnels_state, rpc_sock_path))
                rpc_c = await stack.enter_async_context(
                    RPCUnixClient(rpc_sock_path, command_cb))

                rpc = rpc_c.session
                resp = await rpc.issue_command("dump_tunnels_state")
                self.assertDictEqual(resp, {"tunnels": []})
Esempio n. 26
0
async def test_get_pooled_transactions_request(request, event_bus,
                                               other_event_bus, event_loop,
                                               chaindb_20, client_and_server):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    if get_highest_eth_protocol_version(client_peer) < ETHProtocolV65.version:
        pytest.skip("Test not applicable below eth/65")

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(client_proxy_peer_pool))

        proxy_peer_pool = ETHProxyPeerPool(server_event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        # The reason we run this test separately from the other request tests is because
        # GetPooledTransactions requests should be answered from the tx pool which the previous
        # test does not depend on.
        await stack.enter_async_context(
            background_asyncio_service(
                TxPool(server_event_bus, proxy_peer_pool, lambda _: True)))

        # The tx pool always answers these with an empty response
        txs = await proxy_peer.eth_api.get_pooled_transactions((decode_hex(
            '0x9ea39df6210064648ecbc465cd628fe52f69af53792e1c2f27840133435159d4'
        ), ))
        assert len(txs) == 0
Esempio n. 27
0
    async def apply(self, connection: ConnectionAPI) -> AsyncIterator[None]:
        if hasattr(self, '_connection') is True:
            raise Exception("Reentrance!")

        self._connection = connection

        async with AsyncExitStack() as stack:
            for behavior in self.get_behaviors():
                if behavior.applies_to(connection):
                    await stack.enter_async_context(behavior.apply(connection))

            with connection.add_behavior(self.name, self):
                self.on_apply(connection)
                yield
Esempio n. 28
0
 async def create_batch_and_listen(
     cls,
     number: int,
     security_protocol: TProtocol = None,
     muxer_opt: TMuxerOptions = None,
 ) -> AsyncIterator[Tuple[Swarm, ...]]:
     async with AsyncExitStack() as stack:
         ctx_mgrs = [
             await stack.enter_async_context(
                 cls.create_and_listen(security_protocol=security_protocol,
                                       muxer_opt=muxer_opt))
             for _ in range(number)
         ]
         yield tuple(ctx_mgrs)
Esempio n. 29
0
    async def create(cls, number: int) -> AsyncIterator[Tuple["DummyAccountNode", ...]]:
        """
        Create a new DummyAccountNode and attach a libp2p node, a floodsub, and
        a pubsub instance to this new node.

        We use create as this serves as a factory function and allows us
        to use async await, unlike the init function
        """
        async with PubsubFactory.create_batch_with_floodsub(number) as pubsubs:
            async with AsyncExitStack() as stack:
                dummy_acount_nodes = tuple(cls(pubsub) for pubsub in pubsubs)
                for node in dummy_acount_nodes:
                    await stack.enter_async_context(background_trio_service(node))
                yield dummy_acount_nodes
Esempio n. 30
0
async def test_proxy_peer_requests_with_timeouts(request, event_bus,
                                                 other_event_bus, event_loop,
                                                 client_and_server):

    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))
        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(run_service(client_proxy_peer_pool))

        server_proxy_peer_pool = ETHProxyPeerPool(
            server_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(run_service(server_proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_block_headers(0,
                                                       1,
                                                       0,
                                                       False,
                                                       timeout=0.01)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_receipts((), timeout=0.01)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_block_bodies((), timeout=0.01)

        with pytest.raises(asyncio.TimeoutError):
            await proxy_peer.eth_api.get_node_data((), timeout=0.01)