async def _run(self) -> None: self.logger.info("Running server...") mapped_external_ip = await self.upnp_service.add_nat_portmap() if mapped_external_ip is None: external_ip = '0.0.0.0' else: external_ip = mapped_external_ip await self._start_tcp_listener() self.logger.info( "enode://%s@%s:%s", self.privkey.public_key.to_hex()[2:], external_ip, self.port, ) self.logger.info('network: %s', self.network_id) self.logger.info('peers: max_peers=%s', self.max_peers) addr = Address(external_ip, self.port, self.port) discovery_proto = PreferredNodeDiscoveryProtocol( self.privkey, addr, self.bootstrap_nodes, self.preferred_nodes) await self._start_udp_listener(discovery_proto) self.discovery = DiscoveryService(discovery_proto, self.peer_pool) asyncio.ensure_future(self.peer_pool.run()) asyncio.ensure_future(self.discovery.run()) asyncio.ensure_future(self.upnp_service.run()) self.syncer = self._make_syncer(self.peer_pool) await self.syncer.run()
async def _run(self) -> None: external_ip = "0.0.0.0" address = Address(external_ip, self.trinity_config.port, self.trinity_config.port) if self.trinity_config.use_discv5: protocol = get_protocol(self.trinity_config) topic = get_discv5_topic(self.trinity_config, protocol) discovery_protocol: DiscoveryProtocol = DiscoveryByTopicProtocol( topic, self.trinity_config.nodekey, address, self.trinity_config.bootstrap_nodes, self.cancel_token, ) else: discovery_protocol = PreferredNodeDiscoveryProtocol( self.trinity_config.nodekey, address, self.trinity_config.bootstrap_nodes, self.trinity_config.preferred_nodes, self.cancel_token, ) discovery_service = DiscoveryService( discovery_protocol, self.trinity_config.port, self.event_bus, self.cancel_token, ) await discovery_service.run()
async def _run(self) -> None: self.logger.info("Running server...") mapped_external_ip = await self.upnp_service.add_nat_portmap() if mapped_external_ip is None: external_ip = '0.0.0.0' else: external_ip = mapped_external_ip await self._start_tcp_listener() self.logger.info( "enode://%s@%s:%s", self.privkey.public_key.to_hex()[2:], external_ip, self.port, ) self.logger.info('network: %s', self.network_id) self.logger.info('peers: max_peers=%s', self.max_peers) addr = Address(external_ip, self.port, self.port) discovery_proto = PreferredNodeDiscoveryProtocol( self.privkey, addr, self.bootstrap_nodes, self.preferred_nodes, self.cancel_token) self.discovery = DiscoveryService(discovery_proto, self.peer_pool, self.port, self.cancel_token) self.run_daemon(self.peer_pool) self.run_daemon(self.discovery) # UPNP service is still experimental and not essential, so we don't use run_daemon() for # it as that means if it crashes we'd be terminated as well. self.run_child_service(self.upnp_service) self.syncer = self._make_syncer(self.peer_pool) await self.syncer.run()
async def _run(self) -> None: external_ip = "0.0.0.0" address = Address(external_ip, self.trinity_config.port, self.trinity_config.port) discovery_protocol = PreferredNodeDiscoveryProtocol( self.trinity_config.nodekey, address, self.trinity_config.bootstrap_nodes, self.trinity_config.preferred_nodes, self.cancel_token, ) if self.is_discovery_disabled: discovery_service: BaseService = StaticDiscoveryService( self.event_bus, self.trinity_config.preferred_nodes, self.cancel_token, ) else: discovery_service = DiscoveryService( discovery_protocol, self.trinity_config.port, self.event_bus, self.cancel_token, ) try: await discovery_service.run() except Exception: await self.event_bus.broadcast( ShutdownRequest("Discovery ended unexpectedly"))
def __init__(self, chain_config: ChainConfig) -> None: super().__init__(chain_config) self.network_id = chain_config.network_id self.nodekey = chain_config.nodekey self._port = chain_config.port self._discovery_proto = PreferredNodeDiscoveryProtocol( chain_config.nodekey, Address('0.0.0.0', chain_config.port, chain_config.port), bootstrap_nodes=chain_config.bootstrap_nodes, preferred_nodes=chain_config.preferred_nodes, ) self._peer_pool = self._create_peer_pool(chain_config) self._discovery = DiscoveryService(self._discovery_proto, self._peer_pool) self.add_service(self._peer_pool) self.create_and_add_tx_pool()
async def run() -> None: socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind(('0.0.0.0', listen_port)) async with TrioEndpoint.serve( networking_connection_config) as endpoint: service = DiscoveryService(privkey, addr, bootstrap_nodes, endpoint, socket) await TrioManager.run_service(service)
async def main() -> None: parser = argparse.ArgumentParser() parser.add_argument('-bootnode', type=str, help="The enode to use as bootnode") parser.add_argument('-networkid', type=int, choices=[ROPSTEN_NETWORK_ID, MAINNET_NETWORK_ID], default=ROPSTEN_NETWORK_ID, help="1 for mainnet, 3 for testnet") parser.add_argument('-l', type=str, help="Log level", default="info") args = parser.parse_args() logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') if args.l == "debug2": # noqa: E741 log_level = DEBUG2_LEVEL_NUM else: log_level = getattr(logging, args.l.upper()) logging.getLogger('p2p').setLevel(log_level) network_cfg = PRECONFIGURED_NETWORKS[args.networkid] # Listen on a port other than 30303 so that we can test against a local geth instance # running on that port. listen_port = 30304 # Use a hard-coded privkey so that our enode is always the same. privkey = keys.PrivateKey( b'~\x054{4\r\xd64\x0f\x98\x1e\x85;\xcc\x08\x1eQ\x10t\x16\xc0\xb0\x7f)=\xc4\x1b\xb7/\x8b&\x83' ) # noqa: E501 addr = kademlia.Address('127.0.0.1', listen_port, listen_port) if args.bootnode: bootstrap_nodes = tuple([kademlia.Node.from_uri(args.bootnode)]) else: bootstrap_nodes = tuple( kademlia.Node.from_uri(enode) for enode in network_cfg.bootnodes) ipc_path = Path(f"networking-{uuid.uuid4()}.ipc") networking_connection_config = ConnectionConfig( name=NETWORKING_EVENTBUS_ENDPOINT, path=ipc_path) headerdb = TrioHeaderDB(AtomicDB(MemoryDB())) headerdb.persist_header(network_cfg.genesis_header) vm_config = network_cfg.vm_configuration enr_field_providers = (functools.partial(generate_eth_cap_enr_field, vm_config, headerdb), ) socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind(('0.0.0.0', listen_port)) async with TrioEndpoint.serve(networking_connection_config) as endpoint: service = DiscoveryService(privkey, addr, bootstrap_nodes, endpoint, socket, enr_field_providers) service.logger.info("Enode: %s", service.this_node.uri()) async with background_trio_service(service): await service.manager.wait_finished()
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: config = boot_info.trinity_config db = DBClient.connect(config.database_ipc_path) if boot_info.args.disable_discovery: discovery_service: async_service.Service = StaticDiscoveryService( event_bus, config.preferred_nodes, ) else: vm_config = config.get_app_config(Eth1AppConfig).get_chain_config().vm_configuration headerdb = TrioHeaderDB(db) eth_cap_provider = functools.partial(generate_eth_cap_enr_field, vm_config, headerdb) socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind(("0.0.0.0", config.port)) base_db = LevelDB(config.node_db_dir) node_db = NodeDB(default_identity_scheme_registry, base_db) # discovery_service = PreferredNodeDiscoveryService( # config.nodekey, # config.port, # config.port, # config.bootstrap_nodes, # config.preferred_nodes, # event_bus, # socket, # node_db, # (eth_cap_provider,), # ) discovery_service = DiscoveryService( config.nodekey, config.port, config.port, config.bootstrap_nodes, event_bus, socket, node_db, (eth_cap_provider,), ) try: with db: await async_service.run_trio_service(discovery_service) except Exception: event_bus.broadcast_nowait(ShutdownRequest("Discovery ended unexpectedly")) raise
def __init__(self, plugin_manager: PluginManager, chain_config: ChainConfig) -> None: super().__init__(plugin_manager, chain_config) self.network_id = chain_config.network_id self.nodekey = chain_config.nodekey self._port = chain_config.port self._discovery_proto = PreferredNodeDiscoveryProtocol( chain_config.nodekey, Address('0.0.0.0', chain_config.port, chain_config.port), bootstrap_nodes=chain_config.bootstrap_nodes, preferred_nodes=chain_config.preferred_nodes, ) self._peer_pool = self._create_peer_pool(chain_config) self._discovery = DiscoveryService( self._discovery_proto, self._peer_pool, self.cancel_token) self._peer_chain = LightPeerChain(self.headerdb, self._peer_pool, self.cancel_token) self.notify_resource_available()
async def _run(self) -> None: external_ip = "0.0.0.0" address = Address(external_ip, self.trinity_config.port, self.trinity_config.port) if self.trinity_config.use_discv5: protocol = get_protocol(self.trinity_config) topic = get_discv5_topic(self.trinity_config, protocol) discovery_protocol: DiscoveryProtocol = DiscoveryByTopicProtocol( topic, self.trinity_config.nodekey, address, self.trinity_config.bootstrap_nodes, self.cancel_token, ) else: discovery_protocol = PreferredNodeDiscoveryProtocol( self.trinity_config.nodekey, address, self.trinity_config.bootstrap_nodes, self.trinity_config.preferred_nodes, self.cancel_token, ) if self.is_discovery_disabled: discovery_service: BaseService = StaticDiscoveryService( self.event_bus, self.trinity_config.preferred_nodes, self.cancel_token, ) else: discovery_service = DiscoveryService( discovery_protocol, self.trinity_config.port, self.event_bus, self.cancel_token, ) try: await discovery_service.run() except Exception: self.event_bus.request_shutdown("Discovery ended unexpectedly")
async def test_get_max_neighbours_per_packet(nursery): # This test is just a safeguard against changes that inadvertently modify the behaviour of # _get_max_neighbours_per_packet(). assert DiscoveryService._get_max_neighbours_per_packet() == 12
class LightNode(Node): chain_class: Type[LightDispatchChain] = None _chain: LightDispatchChain = None _p2p_server: LightPeerChain = None network_id: int = None nodekey: PrivateKey = None def __init__(self, chain_config: ChainConfig) -> None: super().__init__(chain_config) self.network_id = chain_config.network_id self.nodekey = chain_config.nodekey self._port = chain_config.port self._discovery_proto = PreferredNodeDiscoveryProtocol( chain_config.nodekey, Address('0.0.0.0', chain_config.port, chain_config.port), bootstrap_nodes=chain_config.bootstrap_nodes, preferred_nodes=chain_config.preferred_nodes, ) self._peer_pool = self._create_peer_pool(chain_config) self._discovery = DiscoveryService(self._discovery_proto, self._peer_pool) self.add_service(self._peer_pool) self.create_and_add_tx_pool() async def _run(self) -> None: # TODO add a datagram endpoint service that can be added with self.add_service self.logger.info( "enode://%s@%s:%s", self.nodekey.public_key.to_hex()[2:], '0.0.0.0', self._port, ) self.logger.info('network: %s', self.network_id) self.logger.info('peers: max_peers=%s', self._peer_pool.max_peers) transport, _ = await asyncio.get_event_loop().create_datagram_endpoint( lambda: self._discovery_proto, local_addr=('0.0.0.0', self._port)) asyncio.ensure_future(self._discovery.run()) try: await super()._run() finally: await self._discovery.cancel() def get_chain(self) -> LightDispatchChain: if self._chain is None: if self.chain_class is None: raise AttributeError("LightNode subclass must set chain_class") self._chain = self.chain_class(self._headerdb, peer_chain=self.get_p2p_server()) return self._chain def get_p2p_server(self) -> LightPeerChain: if self._p2p_server is None: if self.chain_class is None: raise AttributeError("LightNode subclass must set chain_class") self._p2p_server = LightPeerChain(self.headerdb, self._peer_pool, self.chain_class) return self._p2p_server def get_peer_pool(self) -> PeerPool: return self._peer_pool def _create_peer_pool(self, chain_config: ChainConfig) -> PeerPool: return PeerPool( LESPeer, self.headerdb, chain_config.network_id, chain_config.nodekey, self.chain_class.vm_configuration, )
class Server(BaseService): """Server listening for incoming connections""" logger = logging.getLogger("p2p.server.Server") _tcp_listener = None _udp_listener = None peer_pool: PeerPool = None def __init__(self, privkey: datatypes.PrivateKey, port: int, chain: AsyncChain, chaindb: AsyncChainDB, headerdb: 'BaseAsyncHeaderDB', base_db: BaseDB, network_id: int, max_peers: int = DEFAULT_MAX_PEERS, peer_class: Type[BasePeer] = ETHPeer, bootstrap_nodes: Tuple[Node, ...] = None, preferred_nodes: Sequence[Node] = None, token: CancelToken = None, ) -> None: super().__init__(token) self.headerdb = headerdb self.chaindb = chaindb self.chain = chain self.base_db = base_db self.privkey = privkey self.port = port self.network_id = network_id self.peer_class = peer_class self.max_peers = max_peers self.bootstrap_nodes = bootstrap_nodes self.preferred_nodes = preferred_nodes if self.preferred_nodes is None and network_id in DEFAULT_PREFERRED_NODES: self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id] self.upnp_service = UPnPService(port, token=self.cancel_token) self.peer_pool = self._make_peer_pool() if not bootstrap_nodes: self.logger.warn("Running with no bootstrap nodes") async def _start_tcp_listener(self) -> None: # TODO: Support IPv6 addresses as well. self._tcp_listener = await asyncio.start_server( self.receive_handshake, host='0.0.0.0', port=self.port, ) async def _close_tcp_listener(self) -> None: self._tcp_listener.close() await self._tcp_listener.wait_closed() async def _start_udp_listener(self, discovery: DiscoveryProtocol) -> None: loop = asyncio.get_event_loop() # TODO: Support IPv6 addresses as well. self._udp_transport, _ = await loop.create_datagram_endpoint( lambda: discovery, local_addr=('0.0.0.0', self.port), family=socket.AF_INET) async def _close_udp_listener(self) -> None: cast(asyncio.DatagramTransport, self._udp_transport).abort() async def _close(self) -> None: await asyncio.gather( self._close_tcp_listener(), self._close_udp_listener()) def _make_syncer(self, peer_pool: PeerPool) -> BaseService: # This method exists only so that ShardSyncer can provide a different implementation. return FullNodeSyncer( self.chain, self.chaindb, self.base_db, peer_pool, self.cancel_token) def _make_peer_pool(self) -> PeerPool: # This method exists only so that ShardSyncer can provide a different implementation. return PeerPool( self.peer_class, self.headerdb, self.network_id, self.privkey, self.chain.vm_configuration, max_peers=self.max_peers, ) async def _run(self) -> None: self.logger.info("Running server...") mapped_external_ip = await self.upnp_service.add_nat_portmap() if mapped_external_ip is None: external_ip = '0.0.0.0' else: external_ip = mapped_external_ip await self._start_tcp_listener() self.logger.info( "enode://%s@%s:%s", self.privkey.public_key.to_hex()[2:], external_ip, self.port, ) self.logger.info('network: %s', self.network_id) self.logger.info('peers: max_peers=%s', self.max_peers) addr = Address(external_ip, self.port, self.port) discovery_proto = PreferredNodeDiscoveryProtocol( self.privkey, addr, self.bootstrap_nodes, self.preferred_nodes) await self._start_udp_listener(discovery_proto) self.discovery = DiscoveryService(discovery_proto, self.peer_pool) asyncio.ensure_future(self.peer_pool.run()) asyncio.ensure_future(self.discovery.run()) asyncio.ensure_future(self.upnp_service.run()) self.syncer = self._make_syncer(self.peer_pool) await self.syncer.run() async def _cleanup(self) -> None: self.logger.info("Closing server...") await asyncio.gather( self.peer_pool.cancel(), self.discovery.cancel(), ) await self._close() async def receive_handshake( self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: expected_exceptions = ( TimeoutError, PeerConnectionLost, HandshakeFailure, asyncio.IncompleteReadError, ) try: await self._receive_handshake(reader, writer) except expected_exceptions as e: self.logger.debug("Could not complete handshake: %s", e) except OperationCancelled: pass except Exception as e: self.logger.exception("Unexpected error handling handshake") async def _receive_handshake( self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: msg = await self.wait( reader.read(ENCRYPTED_AUTH_MSG_LEN), timeout=REPLY_TIMEOUT) ip, socket, *_ = writer.get_extra_info("peername") remote_address = Address(ip, socket) self.logger.debug("Receiving handshake from %s", remote_address) got_eip8 = False try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError: # Try to decode as EIP8 got_eip8 = True msg_size = big_endian_to_int(msg[:2]) remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2 msg += await self.wait( reader.read(remaining_bytes), timeout=REPLY_TIMEOUT) try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) except DecryptionError as e: self.logger.debug("Failed to decrypt handshake: %s", e) return initiator_remote = Node(initiator_pubkey, remote_address) responder = HandshakeResponder(initiator_remote, self.privkey, got_eip8, self.cancel_token) responder_nonce = secrets.token_bytes(HASH_LEN) auth_ack_msg = responder.create_auth_ack_message(responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) # Use the `writer` to send the reply to the remote writer.write(auth_ack_ciphertext) await self.wait(writer.drain()) # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer` aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce=initiator_nonce, responder_nonce=responder_nonce, remote_ephemeral_pubkey=ephem_pubkey, auth_init_ciphertext=msg, auth_ack_ciphertext=auth_ack_ciphertext ) # Create and register peer in peer_pool peer = self.peer_class( remote=initiator_remote, privkey=self.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=self.headerdb, network_id=self.network_id, inbound=True, ) if self.peer_pool.is_full: peer.disconnect(DisconnectReason.too_many_peers) elif not self.peer_pool.is_valid_connection_candidate(peer.remote): peer.disconnect(DisconnectReason.useless_peer) total_peers = len(self.peer_pool.connected_nodes) inbound_peer_count = len([ peer for peer in self.peer_pool.connected_nodes.values() if peer.inbound ]) if total_peers > 1 and inbound_peer_count / total_peers > DIAL_IN_OUT_RATIO: # make sure to have at least 1/4 outbound connections peer.disconnect(DisconnectReason.too_many_peers) else: # We use self.wait() here as a workaround for # https://github.com/ethereum/py-evm/issues/670. await self.wait(self.do_handshake(peer)) async def do_handshake(self, peer: BasePeer) -> None: try: await peer.do_p2p_handshake(), except MalformedMessage as e: raise HandshakeFailure() from e await peer.do_sub_proto_handshake() await peer.ensure_same_side_on_dao_fork(self.chain.vm_configuration) self._start_peer(peer) def _start_peer(self, peer: BasePeer) -> None: # This method exists only so that we can monkey-patch it in tests. self.peer_pool.start_peer(peer)