def enrToMultiAddress(_enr): knode = KNode.from_enr_repr(_enr) return { "enode": knode.uri(), "enrdata": { "address": knode.address, "pubkey": knode.pubkey, "id": knode.id }, "enritems": ENR.from_repr(_enr).items(), "multiaddr": Handler.enodeToMultiAddress(knode.uri()) }
def _get_peer_candidates( self, max_candidates: int, should_skip_fn: Callable[[NodeAPI], bool]) -> Iterable[NodeAPI]: """ Return up to `max_candidates` candidates sourced from peers whe have historically connected to which match the following criteria: * Matches all of: network_id, protocol, genesis_hash, protocol_version * Either has no blacklist record or existing blacklist record is expired. * Not in the set of remotes we are already connected to. """ now = datetime.datetime.utcnow() metadata_filters = self._get_candidate_filter_query() # Query the database for peers that match our criteria. candidates = self.session.query(Remote).outerjoin( # type: ignore # Join against the blacklist records with matching node ID Remote.blacklist, ).filter( # XXX: This is no longer necessary as the should_skip_fn() function now takes care of # skipping blacklisted peers, but not sure we want to get rid of this? # Either they have no blacklist record or the record is expired. ((Remote.blacklist == None) | (BlacklistRecord.expires_at <= now)), # noqa: E711 # They match our filters for network metadata *metadata_filters, ).order_by( # We want the ones that we have recently connected to succesfully to be first. Remote.last_connected_at.desc(), # type: ignore ) # Return them as an iterator to allow the consuming process to # determine how many records it wants to fetch. for candidate in candidates: node = Node.from_enr_repr(candidate.enr) if not should_skip_fn(node): yield node