async def get_peer_candidates(self, num_requested: int, num_connected_peers: int) -> Iterable[Node]: response = await self.event_bus.request( PeerCandidatesRequest(num_requested), TO_DISCOVERY_BROADCAST_CONFIG, ) return response.candidates
async def get_peer_candidates( self, max_candidates: int, should_skip_fn: Callable[[NodeAPI], bool]) -> Tuple[NodeAPI, ...]: response = await self.event_bus.request( PeerCandidatesRequest(max_candidates, should_skip_fn), self.config, ) return response.candidates
async def get_peer_candidates( self, max_candidates: int, should_skip_fn: Callable[[NodeAPI], bool]) -> Tuple[NodeAPI, ...]: await self.event_bus.wait_until_any_endpoint_subscribed_to( PeerCandidatesRequest) response = await self.event_bus.request( PeerCandidatesRequest(max_candidates, should_skip_fn), TO_DISCOVERY_BROADCAST_CONFIG, ) return response.candidates
async def get_peer_candidates( self, num_requested: int, connected_remotes: Set[NodeAPI]) -> Tuple[NodeAPI, ...]: await self.event_bus.wait_until_any_endpoint_subscribed_to( PeerCandidatesRequest) response = await self.event_bus.request( PeerCandidatesRequest(num_requested), TO_DISCOVERY_BROADCAST_CONFIG, ) return tuple(candidate for candidate in response.candidates if candidate not in connected_remotes)
async def get_peer_candidates(self, num_requested: int, connected_remotes: Set[Node]) -> Tuple[Node, ...]: response = await self.event_bus.request( PeerCandidatesRequest(num_requested), TO_DISCOVERY_BROADCAST_CONFIG, ) return tuple( candidate for candidate in response.candidates if candidate not in connected_remotes )
async def maybe_connect_more_peers(self) -> None: while self.is_operational: await self.sleep(DISOVERY_INTERVAL) available_peer_slots = self.max_peers - len(self) if available_peer_slots > 0: try: response = await self.wait( # TODO: This should use a BroadcastConfig to send the request to discovery # only as soon as we have cut a new Lahja release. self.event_bus.request( PeerCandidatesRequest(available_peer_slots)), timeout=REQUEST_PEER_CANDIDATE_TIMEOUT) except TimeoutError: self.logger.warning( "Discovery did not answer PeerCandidateRequest in time" ) continue # In some cases (e.g ROPSTEN or private testnets), the discovery table might be # full of bad peers so if we can't connect to any peers we try a random bootstrap # node as well. if not len(self): try: response = await self.wait( # TODO: This should use a BroadcastConfig to send the request to # discovery only as soon as we have cut a new Lahja release. self.event_bus.request(RandomBootnodeRequest()), timeout=REQUEST_PEER_CANDIDATE_TIMEOUT) except TimeoutError: self.logger.warning( "Discovery did not answer RandomBootnodeRequest in time" ) continue self.logger.debug2("Received candidates to connect to (%s)", response.candidates) await self.connect_to_nodes(from_uris(response.candidates))
async def main() -> None: logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S') logger = logging.getLogger() parser = argparse.ArgumentParser() parser.add_argument('--ipc', type=str, help="The path to DiscoveryService's IPC file") args = parser.parse_args() # XXX: This is an ugly hack, but it's the easiest way to ensure we use the same network as the # DiscoveryService instance we connect to. for n_id, network_cfg in PRECONFIGURED_NETWORKS.items(): if network_cfg.data_dir_name in args.ipc: network_id = n_id break else: raise AssertionError("Failed to detect network_id") logger.info( f"Asking DiscoveryService for peers on {network_cfg.chain_name}") connection_config = ConnectionConfig(DISCOVERY_EVENTBUS_ENDPOINT, args.ipc) network_cfg = PRECONFIGURED_NETWORKS[network_id] vm_config = network_cfg.vm_configuration fork_blocks = extract_fork_blocks(vm_config) MAX_PEERS = 260 skip_list: Set[NodeID] = set() async with TrioEndpoint(f"discv4-driver-{uuid.uuid4()}").run() as client: with trio.fail_after(2): await client.connect_to_endpoints(connection_config) await client.wait_until_any_endpoint_subscribed_to( PeerCandidatesRequest) while True: logger.info("Skip list has %d peers", len(skip_list)) should_skip = functools.partial( skip_candidate_if_on_list_or_fork_mismatch, network_cfg.genesis_header.hash, network_cfg.genesis_header.block_number, fork_blocks, skip_list, ) with trio.fail_after(1): response = await client.request( PeerCandidatesRequest(MAX_PEERS, should_skip)) candidates = response.candidates missing_forkid = [] for candidate in candidates: try: extract_forkid(candidate.enr) except ENRMissingForkID: missing_forkid.append(candidate.id) logger.info( "Got %d connection candidates, %d of those with a matching ForkID", len(candidates), len(candidates) - len(missing_forkid), ) # Add candidates with no forkid to the skip list, just so that we keep triggering # random discovery lookups and hopefully come across more candidates with # compatible forkids logger.info("Adding %d candidates with no ForkID to skip list", len(missing_forkid)) skip_list.update(missing_forkid) await trio.sleep(10)