Beispiel #1
0
def test_forkid_validation(local_head, remote_forkid, expected_error):
    fork_blocks = extract_fork_blocks(MAINNET_VM_CONFIGURATION)
    if expected_error:
        with pytest.raises(expected_error):
            validate_forkid(remote_forkid, MAINNET_GENESIS_HASH, local_head, fork_blocks)
    else:
        validate_forkid(remote_forkid, MAINNET_GENESIS_HASH, local_head, fork_blocks)
Beispiel #2
0
async def generate_eth_cap_enr_field(
        vm_config: Tuple[Tuple[BlockNumber, Type[VirtualMachineAPI]], ...],
        headerdb: BaseAsyncHeaderDB,
) -> Tuple[Literal[b'eth'], Tuple[bytes, bytes]]:
    head = await headerdb.coro_get_canonical_head()
    genesis_hash = await headerdb.coro_get_canonical_block_hash(GENESIS_BLOCK_NUMBER)
    fork_blocks = forkid.extract_fork_blocks(vm_config)
    our_forkid = forkid.make_forkid(genesis_hash, head.block_number, fork_blocks)
    return (b'eth', sedes.List([forkid.ForkID]).serialize([our_forkid]))
Beispiel #3
0
def _make_node_with_enr_and_forkid(genesis_hash, head, vm_config):
    fork_blocks = forkid.extract_fork_blocks(vm_config)
    node_forkid = forkid.make_forkid(genesis_hash, head, fork_blocks)
    ip = socket.inet_aton(IPAddressFactory.generate())
    udp_port = 30304
    enr = ENRFactory(
        custom_kv_pairs={
            b'eth': sedes.List([forkid.ForkID]).serialize([node_forkid]),
            IP_V4_ADDRESS_ENR_KEY: ip,
            UDP_PORT_ENR_KEY: udp_port,
            TCP_PORT_ENR_KEY: udp_port,
        })
    return Node(enr)
Beispiel #4
0
def test_skip_candidate_if_on_list_or_fork_mismatch_is_pickleable():
    fork_blocks = forkid.extract_fork_blocks(MAINNET_VM_CONFIGURATION)
    skip_list = [NodeID(b'')]
    partial_fn = functools.partial(
        skip_candidate_if_on_list_or_fork_mismatch,
        MAINNET_GENESIS_HEADER.hash,
        MAINNET_GENESIS_HEADER.block_number,
        fork_blocks,
        skip_list,
    )
    unpickled = pickle.loads(pickle.dumps(partial_fn))
    assert unpickled.func == partial_fn.func
    assert unpickled.args == partial_fn.args
Beispiel #5
0
def test_extract_fork_blocks():
    fork_blocks = extract_fork_blocks(MAINNET_VM_CONFIGURATION)
    assert fork_blocks == (
        1150000,
        1920000,
        2463000,
        2675000,
        4370000,
        7280000,
        9069000,
        9200000,
        12244000,
    )
Beispiel #6
0
    async def maybe_connect_more_peers(self) -> None:
        rate_limiter = TokenBucket(
            rate=1 / PEER_CONNECT_INTERVAL,
            capacity=MAX_SEQUENTIAL_PEER_CONNECT,
        )

        # We set this to 0 so that upon startup (when our RoutingTable will have only a few
        # entries) we use the less restrictive filter function and get as many connection
        # candidates as possible.
        last_candidates_count = 0
        while self.manager.is_running:
            if self.is_full:
                await asyncio.sleep(PEER_CONNECT_INTERVAL)
                continue

            await rate_limiter.take()

            if last_candidates_count >= self.available_slots:
                head = await self.get_chain_head()
                genesis_hash = await self.get_genesis_hash()
                fork_blocks = extract_fork_blocks(self.vm_configuration)
                should_skip = functools.partial(
                    skip_candidate_if_on_list_or_fork_mismatch,
                    genesis_hash,
                    head.block_number,
                    fork_blocks,
                )
            else:
                self.logger.debug(
                    "Didn't get enough candidates last time, falling back to skipping "
                    "only peers that are blacklisted or already connected to")
                should_skip = skip_candidate_if_on_list  # type: ignore

            try:
                candidate_counts = await asyncio.gather(
                    *(self._add_peers_from_backend(backend, should_skip)
                      for backend in self.peer_backends))
                last_candidates_count = sum(candidate_counts)
            except OperationCancelled:
                # FIXME: We may no longer need this; need to confirm that none of the tasks we
                # create use BaseService.
                break
            except asyncio.CancelledError:
                # no need to log this exception, this is expected
                raise
            except Exception:
                self.logger.exception(
                    "unexpected error during peer connection")
                # Continue trying to connect to peers, even if there was a
                # surprising failure during one of the attempts.
                continue
Beispiel #7
0
def test_skip_candidate_if_on_list_or_fork_mismatch():
    mainnet_fork_blocks = forkid.extract_fork_blocks(MAINNET_VM_CONFIGURATION)
    should_skip_fn = functools.partial(
        skip_candidate_if_on_list_or_fork_mismatch,
        MAINNET_GENESIS_HEADER.hash,
        MAINNET_GENESIS_HEADER.block_number,
        mainnet_fork_blocks,
    )
    no_forkid_nodes = NodeFactory.create_batch(10)
    compatible_node = _make_node_with_enr_and_forkid(
        MAINNET_GENESIS_HEADER.hash, MAINNET_GENESIS_HEADER.block_number,
        MAINNET_VM_CONFIGURATION)
    # Ensure compatible_node's forkid is compatible with our current chain state.
    forkid.validate_forkid(
        forkid.extract_forkid(compatible_node.enr),
        MAINNET_GENESIS_HEADER.hash,
        MAINNET_GENESIS_HEADER.block_number,
        mainnet_fork_blocks,
    )

    # It returns True for candidates on the skip list, even if they are fork-id compatible.
    skip_list = [no_forkid_nodes[1].id, compatible_node.id]
    assert functools.partial(should_skip_fn,
                             skip_list)(compatible_node) is True
    assert functools.partial(should_skip_fn, skip_list)(
        no_forkid_nodes[1]) is True

    # It returns False for candidates with no fork-id that are not on the skip list.
    with pytest.raises(ENRMissingForkID):
        forkid.extract_forkid(no_forkid_nodes[0].enr)
    assert functools.partial(should_skip_fn, skip_list)(
        no_forkid_nodes[0]) is False

    # It returns False for candidates with compatible fork-ids that are not on the skip list
    assert functools.partial(should_skip_fn, [])(compatible_node) is False

    # It returns True for candidates with incompatible fork-ids
    incompatible_node = _make_node_with_enr_and_forkid(
        ROPSTEN_GENESIS_HEADER.hash, ROPSTEN_GENESIS_HEADER.block_number,
        ROPSTEN_VM_CONFIGURATION)
    with pytest.raises(BaseForkIDValidationError):
        forkid.validate_forkid(
            forkid.extract_forkid(incompatible_node.enr),
            MAINNET_GENESIS_HEADER.hash,
            MAINNET_GENESIS_HEADER.block_number,
            mainnet_fork_blocks,
        )
    assert functools.partial(should_skip_fn, [])(incompatible_node) is True
Beispiel #8
0
    async def maybe_connect_more_peers(self) -> None:
        rate_limiter = TokenBucket(
            rate=1 / PEER_CONNECT_INTERVAL,
            capacity=MAX_SEQUENTIAL_PEER_CONNECT,
        )

        # We set this to 0 so that upon startup (when our RoutingTable will have only a few
        # entries) we use the less restrictive filter function and get as many connection
        # candidates as possible.
        last_candidates_count = 0
        while self.manager.is_running:
            if self.is_full:
                await asyncio.sleep(PEER_CONNECT_INTERVAL)
                continue

            await rate_limiter.take()

            if last_candidates_count >= self.available_slots:
                head = await self.get_chain_head()
                genesis_hash = await self.get_genesis_hash()
                fork_blocks = extract_fork_blocks(self.vm_configuration)
                should_skip = functools.partial(
                    skip_candidate_if_on_list_or_fork_mismatch,
                    genesis_hash,
                    head.block_number,
                    fork_blocks,
                )
            else:
                self.logger.debug(
                    "Didn't get enough candidates last time, falling back to skipping "
                    "only peers that are blacklisted or already connected to")
                should_skip = skip_candidate_if_on_list  # type: ignore

            candidate_counts = await asyncio.gather(*(
                self._add_peers_from_backend(backend, should_skip)
                for backend in self.peer_backends
            ))
            last_candidates_count = sum(candidate_counts)
Beispiel #9
0
async def main() -> None:
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    logger = logging.getLogger()

    parser = argparse.ArgumentParser()
    parser.add_argument('--ipc',
                        type=str,
                        help="The path to DiscoveryService's IPC file")
    args = parser.parse_args()

    # XXX: This is an ugly hack, but it's the easiest way to ensure we use the same network as the
    # DiscoveryService instance we connect to.
    for n_id, network_cfg in PRECONFIGURED_NETWORKS.items():
        if network_cfg.data_dir_name in args.ipc:
            network_id = n_id
            break
    else:
        raise AssertionError("Failed to detect network_id")

    logger.info(
        f"Asking DiscoveryService for peers on {network_cfg.chain_name}")
    connection_config = ConnectionConfig(DISCOVERY_EVENTBUS_ENDPOINT, args.ipc)
    network_cfg = PRECONFIGURED_NETWORKS[network_id]
    vm_config = network_cfg.vm_configuration
    fork_blocks = extract_fork_blocks(vm_config)
    MAX_PEERS = 260
    skip_list: Set[NodeID] = set()
    async with TrioEndpoint(f"discv4-driver-{uuid.uuid4()}").run() as client:
        with trio.fail_after(2):
            await client.connect_to_endpoints(connection_config)
            await client.wait_until_any_endpoint_subscribed_to(
                PeerCandidatesRequest)

        while True:
            logger.info("Skip list has %d peers", len(skip_list))
            should_skip = functools.partial(
                skip_candidate_if_on_list_or_fork_mismatch,
                network_cfg.genesis_header.hash,
                network_cfg.genesis_header.block_number,
                fork_blocks,
                skip_list,
            )
            with trio.fail_after(1):
                response = await client.request(
                    PeerCandidatesRequest(MAX_PEERS, should_skip))
            candidates = response.candidates
            missing_forkid = []
            for candidate in candidates:
                try:
                    extract_forkid(candidate.enr)
                except ENRMissingForkID:
                    missing_forkid.append(candidate.id)
            logger.info(
                "Got %d connection candidates, %d of those with a matching ForkID",
                len(candidates),
                len(candidates) - len(missing_forkid),
            )

            # Add candidates with no forkid to the skip list, just so that we keep triggering
            # random discovery lookups and hopefully come across more candidates with
            # compatible forkids
            logger.info("Adding %d candidates with no ForkID to skip list",
                        len(missing_forkid))
            skip_list.update(missing_forkid)
            await trio.sleep(10)
Beispiel #10
0
def _test_make_forkid(vm_config, genesis_hash, head, expected_forkid):
    fork_blocks = extract_fork_blocks(vm_config)
    forkid = make_forkid(genesis_hash, head, fork_blocks)
    assert forkid.hash == expected_forkid.hash
    assert forkid.next == expected_forkid.next