async def test_removes_peers(event_bus): do_mock = mock_request_response( GetConnectedPeersRequest, GetConnectedPeersResponseFactory.from_sessions(TEST_NODES[:2]), event_bus, ) async with do_mock: proxy_peer_pool = ETHProxyPeerPool(event_bus, TO_NETWORKING_BROADCAST_CONFIG) async with run_service(proxy_peer_pool): assert len(await proxy_peer_pool.get_peers()) == 2 await event_bus.broadcast(PeerLeftEvent(TEST_NODES[0])) # Give the peer a moment to remove the peer await asyncio.sleep(0.01) peers = await proxy_peer_pool.get_peers() assert len(peers) == 1 assert peers[0].session == TEST_NODES[1]
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() base_db = DBClient.connect(trinity_config.database_ipc_path) with base_db: beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, base_db, event_bus, loop=asyncio.get_event_loop(), ) import_server = BlockImportServer(event_bus, beam_chain) async with run_service(import_server): await import_server.cancellation()
async def test_run_service_context_manager_lifecycle_with_exception(): service = WaitService() assert not service.is_operational assert not service.is_cancelled assert not service.is_running with pytest.raises(BlowUp): async with run_service(service) as running_service: assert running_service is service assert service.is_operational assert not service.is_cancelled assert service.is_running raise BlowUp assert not service.is_operational assert service.is_cancelled assert not service.is_running
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: args = boot_info.args if args.ethstats_server_url: server_url = args.ethstats_server_url else: server_url = get_default_server_url( boot_info.trinity_config.network_id) service = EthstatsService( boot_info, event_bus, server_url, args.ethstats_server_secret, args.ethstats_node_id, args.ethstats_node_contact, args.ethstats_interval, ) async with run_service(service): await service.cancellation()
async def run_exchange(self, connection: ConnectionAPI) -> AsyncIterator[None]: protocol = connection.get_protocol_for_command_type( self.get_request_cmd_type()) response_stream: ResponseCandidateStream[ TRequestPayload, TResponsePayload] = ResponseCandidateStream( # noqa: E501 connection, protocol, self.get_response_cmd_type(), ) try: self._manager = ExchangeManager( connection, response_stream, ) async with run_service(response_stream): yield finally: del self._manager
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() base_db = DBClient.connect(trinity_config.database_ipc_path) loop = asyncio.get_event_loop() beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, base_db, event_bus, # these preview executions are lower priority than the primary block import loop=loop, urgent=False, ) preview_server = BlockPreviewServer(event_bus, beam_chain, cls.shard_num) async with run_service(preview_server): await preview_server.cancellation()
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: service = PeerCountReporter(event_bus) async with run_service(service): await service.cancellation()
async def test_tx_propagation(event_bus, funded_address_private_key, chain_with_block_validation, tx_validator): initial_two_peers = TEST_NODES[:2] node_one = initial_two_peers[0] node_two = initial_two_peers[1] async with run_proxy_peer_pool(event_bus) as peer_pool: tx_pool = TxPool(event_bus, peer_pool, tx_validator) async with run_service(tx_pool): run_mock_request_response( GetConnectedPeersRequest, GetConnectedPeersResponse(initial_two_peers), event_bus) await asyncio.sleep(0.01) txs_broadcasted_by_peer1 = [ create_random_tx(chain_with_block_validation, funded_address_private_key) ] # this needs to go here to ensure that the subscription is *after* # the one installed by the transaction pool so that the got_txns # event will get set after the other handlers have been called. outgoing_tx, got_txns = observe_outgoing_transactions(event_bus) # Peer1 sends some txs await event_bus.broadcast( TransactionsEvent(session=node_one, command=Transactions(txs_broadcasted_by_peer1)) ) await asyncio.wait_for(got_txns.wait(), timeout=0.1) assert outgoing_tx == [ (node_two, tuple(txs_broadcasted_by_peer1)), ] # Clear the recording, we asserted all we want and would like to have a fresh start outgoing_tx.clear() # Peer1 sends same txs again await event_bus.broadcast( TransactionsEvent(session=node_one, command=Transactions(txs_broadcasted_by_peer1)) ) await asyncio.wait_for(got_txns.wait(), timeout=0.1) # Check that Peer2 doesn't receive them again assert len(outgoing_tx) == 0 # Peer2 sends exact same txs back await event_bus.broadcast( TransactionsEvent(session=node_two, command=Transactions(txs_broadcasted_by_peer1)) ) await asyncio.wait_for(got_txns.wait(), timeout=0.1) # Check that Peer1 won't get them as that is where they originally came from assert len(outgoing_tx) == 0 txs_broadcasted_by_peer2 = [ create_random_tx(chain_with_block_validation, funded_address_private_key), txs_broadcasted_by_peer1[0] ] # Peer2 sends old + new tx await event_bus.broadcast( TransactionsEvent(session=node_two, command=Transactions(txs_broadcasted_by_peer2)) ) await asyncio.wait_for(got_txns.wait(), timeout=0.1) # Not sure why this sleep is needed.... await asyncio.sleep(0.01) # Check that Peer1 receives only the one tx that it didn't know about assert outgoing_tx == [ (node_one, (txs_broadcasted_by_peer2[0],)), ]
async def ConnectionPairFactory(*, alice_handshakers: Tuple[HandshakerAPI, ...] = (), bob_handshakers: Tuple[HandshakerAPI, ...] = (), alice_remote: NodeAPI = None, alice_private_key: keys.PrivateKey = None, alice_client_version: str = 'alice', alice_p2p_version: int = DEVP2P_V5, bob_remote: NodeAPI = None, bob_private_key: keys.PrivateKey = None, bob_client_version: str = 'bob', bob_p2p_version: int = DEVP2P_V5, cancel_token: CancelToken = None, start_streams: bool = True, ) -> AsyncIterator[Tuple[ConnectionAPI, ConnectionAPI]]: if alice_handshakers or bob_handshakers: # We only leverage `negotiate_protocol_handshakes` if we have actual # protocol handshakers since it raises `NoMatchingPeerCapabilities` if # there are no matching capabilities. alice_transport, bob_transport = MemoryTransportPairFactory( alice_remote=alice_remote, alice_private_key=alice_private_key, bob_remote=bob_remote, bob_private_key=bob_private_key, ) alice_devp2p_params = DevP2PHandshakeParamsFactory( client_version_string=alice_client_version, listen_port=alice_transport.remote.address.tcp_port, version=alice_p2p_version, ) bob_devp2p_params = DevP2PHandshakeParamsFactory( client_version_string=bob_client_version, listen_port=bob_transport.remote.address.tcp_port, version=bob_p2p_version, ) if cancel_token is None: cancel_token = CancelTokenFactory() ( (alice_multiplexer, alice_p2p_receipt, alice_protocol_receipts), (bob_multiplexer, bob_p2p_receipt, bob_protocol_receipts), ) = await asyncio.gather( negotiate_protocol_handshakes( alice_transport, alice_devp2p_params, alice_handshakers, cancel_token, ), negotiate_protocol_handshakes( bob_transport, bob_devp2p_params, bob_handshakers, cancel_token, ), ) else: # This path is just for testing to allow us to establish a `Connection` # without any protocols beyond the base p2p protocol. alice_multiplexer, bob_multiplexer = MultiplexerPairFactory( alice_remote=alice_remote, alice_private_key=alice_private_key, alice_p2p_version=alice_p2p_version, bob_remote=bob_remote, bob_private_key=bob_private_key, bob_p2p_version=bob_p2p_version, cancel_token=cancel_token, ) alice_p2p_receipt = DevP2PReceipt( protocol=alice_multiplexer.get_base_protocol(), version=bob_p2p_version, client_version_string=bob_client_version, capabilities=(), listen_port=bob_multiplexer.remote.address.tcp_port, remote_public_key=bob_multiplexer.remote.pubkey, ) bob_p2p_receipt = DevP2PReceipt( protocol=bob_multiplexer.get_base_protocol(), version=alice_p2p_version, client_version_string=alice_client_version, capabilities=(), listen_port=alice_multiplexer.remote.address.tcp_port, remote_public_key=alice_multiplexer.remote.pubkey, ) alice_protocol_receipts = () bob_protocol_receipts = () alice_connection = Connection( multiplexer=alice_multiplexer, devp2p_receipt=alice_p2p_receipt, protocol_receipts=alice_protocol_receipts, is_dial_out=True, ) bob_connection = Connection( multiplexer=bob_multiplexer, devp2p_receipt=bob_p2p_receipt, protocol_receipts=bob_protocol_receipts, is_dial_out=False, ) async with run_service(alice_connection), run_service(bob_connection): if start_streams: alice_connection.start_protocol_streams() bob_connection.start_protocol_streams() yield alice_connection, bob_connection
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config key_pair = cls._load_or_create_node_key(boot_info) beacon_app_config = trinity_config.get_app_config(BeaconAppConfig) base_db = DBClient.connect(trinity_config.database_ipc_path) if boot_info.args.debug_libp2p: logging.getLogger("libp2p").setLevel(logging.DEBUG) else: logging.getLogger("libp2p").setLevel(logging.INFO) with base_db: chain_config = beacon_app_config.get_chain_config() chain = chain_config.beacon_chain_class( base_db, chain_config.genesis_config) # TODO: To simplify, subsribe all subnets subnets: Set[SubnetId] = set( SubnetId(subnet_id) for subnet_id in range(ATTESTATION_SUBNET_COUNT)) # TODO: Handle `bootstrap_nodes`. libp2p_node = Node( key_pair=key_pair, listen_ip="0.0.0.0", listen_port=boot_info.args.port, preferred_nodes=trinity_config.preferred_nodes, chain=chain, subnets=subnets, event_bus=event_bus, ) receive_server = BCCReceiveServer( chain=chain, p2p_node=libp2p_node, topic_msg_queues=libp2p_node.pubsub.my_topics, subnets=subnets, cancel_token=libp2p_node.cancel_token, ) state = chain.get_state_by_slot( chain_config.genesis_config.GENESIS_SLOT) registry_pubkeys = [ v_record.pubkey for v_record in state.validators ] validator_privkeys = {} validator_keymap = chain_config.genesis_data.validator_keymap for pubkey in validator_keymap: try: validator_index = cast(ValidatorIndex, registry_pubkeys.index(pubkey)) except ValueError: cls.logger.error( f'Could not find pubkey {pubkey.hex()} in genesis state' ) raise validator_privkeys[validator_index] = validator_keymap[pubkey] validator = Validator( chain=chain, p2p_node=libp2p_node, validator_privkeys=validator_privkeys, event_bus=event_bus, token=libp2p_node.cancel_token, get_ready_attestations_fn=receive_server. get_ready_attestations, get_aggregatable_attestations_fn=receive_server. get_aggregatable_attestations, import_attestation_fn=receive_server.import_attestation, ) slot_ticker = SlotTicker( genesis_slot=chain_config.genesis_config.GENESIS_SLOT, genesis_time=chain_config.genesis_data.genesis_time, seconds_per_slot=chain_config.genesis_config.SECONDS_PER_SLOT, event_bus=event_bus, token=libp2p_node.cancel_token, ) syncer = BeaconChainSyncer( chain_db=AsyncBeaconChainDB( base_db, chain_config.genesis_config, ), peer_pool=libp2p_node.handshaked_peers, block_importer=SyncBlockImporter(chain), genesis_config=chain_config.genesis_config, event_bus=event_bus, token=libp2p_node.cancel_token, ) metrics_server = HTTPServer( handler=MetricsHandler.handle(chain)(event_bus), port=boot_info.args.metrics_port, ) api_server = HTTPServer( handler=APIHandler.handle(chain)(event_bus), port=boot_info.args.api_port, ) services: Tuple[BaseService, ...] = (libp2p_node, receive_server, slot_ticker, validator, syncer) if boot_info.args.enable_metrics: services += (metrics_server, ) if boot_info.args.enable_api: services += (api_server, ) async with AsyncExitStack() as stack: for service in services: await stack.enter_async_context(run_service(service)) await asyncio.gather(*(service.cancellation() for service in services))
async def test_tx_propagation(event_bus, chain_with_block_validation, tx_validator): initial_two_peers = TEST_NODES[:2] node_one = initial_two_peers[0] node_two = initial_two_peers[1] async with run_proxy_peer_pool(event_bus) as peer_pool: outgoing_tx = observe_outgoing_transactions(event_bus) tx_pool = TxPool(event_bus, peer_pool, tx_validator) async with run_service(tx_pool): run_mock_request_response( GetConnectedPeersRequest, GetConnectedPeersResponse(initial_two_peers), event_bus) await asyncio.sleep(0.01) txs_broadcasted_by_peer1 = [ create_random_tx(chain_with_block_validation) ] # Peer1 sends some txs await event_bus.broadcast( TransactionsEvent(session=node_one, msg=txs_broadcasted_by_peer1, cmd=Transactions) # noqa: E501 ) await asyncio.sleep(0.01) assert outgoing_tx == [ (node_two, tuple(txs_broadcasted_by_peer1)), ] # Clear the recording, we asserted all we want and would like to have a fresh start outgoing_tx.clear() # Peer1 sends same txs again await event_bus.broadcast( TransactionsEvent(session=node_one, msg=txs_broadcasted_by_peer1, cmd=Transactions) # noqa: E501 ) await asyncio.sleep(0.01) # Check that Peer2 doesn't receive them again assert len(outgoing_tx) == 0 # Peer2 sends exact same txs back await event_bus.broadcast( TransactionsEvent(session=node_two, msg=txs_broadcasted_by_peer1, cmd=Transactions) # noqa: E501 ) await asyncio.sleep(0.01) # Check that Peer1 won't get them as that is where they originally came from assert len(outgoing_tx) == 0 txs_broadcasted_by_peer2 = [ create_random_tx(chain_with_block_validation), txs_broadcasted_by_peer1[0] ] # Peer2 sends old + new tx await event_bus.broadcast( TransactionsEvent(session=node_two, msg=txs_broadcasted_by_peer2, cmd=Transactions) # noqa: E501 ) await asyncio.sleep(0.01) # Check that Peer1 receives only the one tx that it didn't know about assert outgoing_tx == [ (node_one, (txs_broadcasted_by_peer2[0], )), ]
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: port = boot_info.trinity_config.port upnp_service = UPnPService(port) async with run_service(upnp_service): await upnp_service.cancellation()
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config key_pair = _load_secp256k1_key_pair_from(trinity_config) beacon_app_config = trinity_config.get_app_config(BeaconAppConfig) base_db = DBClient.connect(trinity_config.database_ipc_path) if boot_info.args.debug_libp2p: logging.getLogger("libp2p").setLevel(logging.DEBUG) else: logging.getLogger("libp2p").setLevel(logging.INFO) with base_db: chain_config = beacon_app_config.get_chain_config() chain = chain_config.beacon_chain_class( base_db, chain_config.genesis_config) # TODO: To simplify, subsribe all subnets subnets: Set[SubnetId] = set( SubnetId(subnet_id) for subnet_id in range(ATTESTATION_SUBNET_COUNT)) # TODO: Handle `bootstrap_nodes`. libp2p_node = Node( key_pair=key_pair, listen_ip="0.0.0.0", listen_port=boot_info.args.port, preferred_nodes=trinity_config.preferred_nodes, chain=chain, subnets=subnets, event_bus=event_bus, ) receive_server = BCCReceiveServer( chain=chain, p2p_node=libp2p_node, topic_msg_queues=libp2p_node.pubsub.my_topics, subnets=subnets, cancel_token=libp2p_node.cancel_token, ) chain_maintainer = ChainMaintainer(chain=chain, event_bus=event_bus, token=libp2p_node.cancel_token) validator_handler = ValidatorHandler( chain=chain, p2p_node=libp2p_node, event_bus=event_bus, get_ready_attestations_fn=receive_server. get_ready_attestations, get_aggregatable_attestations_fn=receive_server. get_aggregatable_attestations, import_attestation_fn=receive_server.import_attestation, token=libp2p_node.cancel_token, ) slot_ticker = SlotTicker( genesis_slot=chain_config.genesis_config.GENESIS_SLOT, genesis_time=chain_config.genesis_time, seconds_per_slot=chain_config.genesis_config.SECONDS_PER_SLOT, event_bus=event_bus, token=libp2p_node.cancel_token, ) syncer = BeaconChainSyncer( chain_db=AsyncBeaconChainDB(base_db, chain_config.genesis_config), peer_pool=libp2p_node.handshaked_peers, block_importer=SyncBlockImporter(chain), genesis_config=chain_config.genesis_config, event_bus=event_bus, token=libp2p_node.cancel_token, ) metrics_server = HTTPServer( handler=MetricsHandler.handle(chain)(event_bus), port=boot_info.args.metrics_port, ) # NOTE: this API server provides an interface into the beacon node api_server = HTTPServer( handler=APIHandler.handle(chain)(event_bus), port=boot_info.args.api_port, ) # NOTE: this API server provides an interface between the beacon node and # any connected validator clients. validator_api_handler = ValidatorAPIHandler( chain, event_bus, chain_config.genesis_time) validator_api_server = HTTPAppServer( routes=validator_api_handler.make_routes(), port=30303) services: Tuple[BaseService, ...] = ( libp2p_node, receive_server, slot_ticker, syncer, validator_api_server, ) if boot_info.args.enable_metrics: services += (metrics_server, ) if boot_info.args.enable_api: services += (api_server, ) if boot_info.args.bn_only: services += (chain_maintainer, validator_handler) async with AsyncExitStack() as stack: for service in services: await stack.enter_async_context(run_service(service)) await asyncio.gather(*(service.cancellation() for service in services))
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config key_pair = cls._load_or_create_node_key(boot_info) beacon_app_config = trinity_config.get_app_config(BeaconAppConfig) base_db = DBClient.connect(trinity_config.database_ipc_path) chain_config = beacon_app_config.get_chain_config() chain = chain_config.beacon_chain_class(base_db, chain_config.genesis_config) # TODO: Handle `bootstrap_nodes`. libp2p_node = Node( key_pair=key_pair, listen_ip="0.0.0.0", listen_port=boot_info.args.port, preferred_nodes=trinity_config.preferred_nodes, chain=chain, ) receive_server = BCCReceiveServer( chain=chain, p2p_node=libp2p_node, topic_msg_queues=libp2p_node.pubsub.my_topics, cancel_token=libp2p_node.cancel_token, ) state = chain.get_state_by_slot( chain_config.genesis_config.GENESIS_SLOT) registry_pubkeys = [v_record.pubkey for v_record in state.validators] validator_privkeys = {} validator_keymap = chain_config.genesis_data.validator_keymap for pubkey in validator_keymap: try: validator_index = cast(ValidatorIndex, registry_pubkeys.index(pubkey)) except ValueError: cls.logger.error( f'Could not find pubkey {pubkey.hex()} in genesis state') raise validator_privkeys[validator_index] = validator_keymap[pubkey] validator = Validator( chain=chain, p2p_node=libp2p_node, validator_privkeys=validator_privkeys, event_bus=event_bus, token=libp2p_node.cancel_token, get_ready_attestations_fn=receive_server.get_ready_attestations, ) slot_ticker = SlotTicker( genesis_slot=chain_config.genesis_config.GENESIS_SLOT, genesis_time=chain_config.genesis_data.genesis_time, seconds_per_slot=chain_config.genesis_config.SECONDS_PER_SLOT, event_bus=event_bus, token=libp2p_node.cancel_token, ) syncer = BeaconChainSyncer( chain_db=AsyncBeaconChainDB( base_db, chain_config.genesis_config, ), peer_pool=libp2p_node.handshaked_peers, block_importer=SyncBlockImporter(chain), genesis_config=chain_config.genesis_config, token=libp2p_node.cancel_token, ) services = (libp2p_node, receive_server, slot_ticker, validator, syncer) async with AsyncExitStack() as stack: for service in services: await stack.enter_async_context(run_service(service)) await asyncio.gather(*(service.cancellation() for service in services))