Esempio n. 1
0
async def test_peer_subscriber_filters_messages(request, event_loop):
    peer, remote = await get_directly_linked_peers(
        request,
        event_loop,
        peer1_class=ETHPeer,
        peer2_class=ETHPeer,
    )

    header_subscriber = HeadersSubscriber()
    all_subscriber = AllSubscriber()

    peer.add_subscriber(header_subscriber)
    peer.add_subscriber(all_subscriber)

    remote.sub_proto.send_get_node_data([b'\x00' * 32])
    remote.sub_proto.send_get_block_headers(HeaderRequest(0, 1, 0, False))
    remote.sub_proto.send_get_node_data([b'\x00' * 32])
    remote.sub_proto.send_get_block_headers(HeaderRequest(1, 1, 0, False))
    remote.sub_proto.send_get_node_data([b'\x00' * 32])

    # yeild to let remote and peer transmit.
    await asyncio.sleep(0.01)

    assert header_subscriber.queue_size == 2
    assert all_subscriber.queue_size == 5
Esempio n. 2
0
    async def _handle_msg(self, peer: ETHPeer, cmd: Command,
                          msg: _DecodedMsgType) -> None:
        # TODO: stop ignoring these once we have proper handling for these messages.
        ignored_commands = (commands.Transactions, commands.NewBlock,
                            commands.NewBlockHashes)

        if isinstance(cmd, ignored_commands):
            pass
        elif isinstance(cmd, commands.GetBlockHeaders):
            query = cast(Dict[Any, Union[bool, int]], msg)
            request = HeaderRequest(
                query['block_number_or_hash'],
                query['max_headers'],
                query['skip'],
                cast(bool, query['reverse']),
            )
            await self._handle_get_block_headers(peer, request)
        elif isinstance(cmd, commands.GetBlockBodies):
            # Only serve up to MAX_BODIES_FETCH items in every request.
            block_hashes = cast(List[Hash32],
                                msg)[:eth_constants.MAX_BODIES_FETCH]
            await self._handler.handle_get_block_bodies(peer, block_hashes)
        elif isinstance(cmd, commands.GetReceipts):
            # Only serve up to MAX_RECEIPTS_FETCH items in every request.
            block_hashes = cast(List[Hash32],
                                msg)[:eth_constants.MAX_RECEIPTS_FETCH]
            await self._handler.handle_get_receipts(peer, block_hashes)
        elif isinstance(cmd, commands.GetNodeData):
            # Only serve up to MAX_STATE_FETCH items in every request.
            node_hashes = cast(List[Hash32],
                               msg)[:eth_constants.MAX_STATE_FETCH]
            await self._handler.handle_get_node_data(peer, node_hashes)
        else:
            self.logger.warn(
                "%s not handled during StateSync, must be implemented", cmd)
Esempio n. 3
0
    async def _handle_msg(
            self, peer: ETHPeer, cmd: Command, msg: _DecodedMsgType) -> None:
        # TODO: stop ignoring these once we have proper handling for these messages.
        ignored_commands = (commands.Transactions, commands.NewBlock, commands.NewBlockHashes)

        if isinstance(cmd, ignored_commands):
            pass
        elif isinstance(cmd, commands.NodeData):
            msg = cast(List[bytes], msg)
            if peer not in self.request_tracker.active_requests:
                # This is probably a batch that we retried after a timeout and ended up receiving
                # more than once, so ignore but log as an INFO just in case.
                self.logger.info(
                    "Got %d NodeData entries from %s that were not expected, ignoring them",
                    len(msg), peer)
                return

            self.logger.debug("Got %d NodeData entries from %s", len(msg), peer)
            _, requested_node_keys = self.request_tracker.active_requests.pop(peer)

            loop = asyncio.get_event_loop()
            node_keys = await loop.run_in_executor(self._executor, list, map(keccak, msg))

            missing = set(requested_node_keys).difference(node_keys)
            self._peer_missing_nodes[peer].update(missing)
            if missing:
                await self.request_nodes(missing)

            await self._process_nodes(zip(node_keys, msg))
        elif isinstance(cmd, commands.GetBlockHeaders):
            query = cast(Dict[Any, Union[bool, int]], msg)
            request = HeaderRequest(
                query['block_number_or_hash'],
                query['max_headers'],
                query['skip'],
                cast(bool, query['reverse']),
            )
            await self._handle_get_block_headers(peer, request)
        elif isinstance(cmd, commands.GetBlockBodies):
            # Only serve up to MAX_BODIES_FETCH items in every request.
            block_hashes = cast(List[Hash32], msg)[:eth_constants.MAX_BODIES_FETCH]
            await self._handler.handle_get_block_bodies(peer, block_hashes)
        elif isinstance(cmd, commands.GetReceipts):
            # Only serve up to MAX_RECEIPTS_FETCH items in every request.
            block_hashes = cast(List[Hash32], msg)[:eth_constants.MAX_RECEIPTS_FETCH]
            await self._handler.handle_get_receipts(peer, block_hashes)
        elif isinstance(cmd, commands.GetNodeData):
            # Only serve up to MAX_STATE_FETCH items in every request.
            node_hashes = cast(List[Hash32], msg)[:eth_constants.MAX_STATE_FETCH]
            await self._handler.handle_get_node_data(peer, node_hashes)
        else:
            self.logger.warn("%s not handled during StateSync, must be implemented", cmd)
Esempio n. 4
0
    async def _handle_get_block_headers(self, peer: ETHPeer,
                                        query: Dict[str, Any]) -> None:
        self.logger.debug("Peer %s made header request: %s", peer, query)
        request = HeaderRequest(
            query['block_number_or_hash'],
            query['max_headers'],
            query['skip'],
            query['reverse'],
        )

        headers = await self._handler.lookup_headers(request)
        self.logger.trace("Replying to %s with %d headers", peer, len(headers))
        peer.sub_proto.send_block_headers(headers)
Esempio n. 5
0
async def test_peer_subscriber_filters_messages(request, event_loop):
    peer, remote = await get_directly_linked_peers(
        request,
        event_loop,
        peer1_class=ETHPeer,
        peer2_class=ETHPeer,
    )
    await peer.events.started.wait()

    with peer.collect_sub_proto_messages() as collector:
        assert collector in peer._subscribers
        remote.sub_proto.send_get_node_data(NodeDataRequest([b'\x00' * 32]))
        remote.sub_proto.send_get_block_headers(HeaderRequest(0, 1, 0, False))
        remote.sub_proto.send_get_node_data(NodeDataRequest([b'\x00' * 32]))
        remote.sub_proto.send_get_block_headers(HeaderRequest(1, 1, 0, False))
        remote.sub_proto.send_get_node_data(NodeDataRequest([b'\x00' * 32]))
        await asyncio.sleep(0.01)

    assert collector not in peer._subscribers

    # yield to let remote and peer transmit.

    all_messages = collector.get_messages()
    assert len(all_messages) == 5

    assert isinstance(all_messages[0][1], GetNodeData)
    assert isinstance(all_messages[1][1], GetBlockHeaders)
    assert isinstance(all_messages[2][1], GetNodeData)
    assert isinstance(all_messages[3][1], GetBlockHeaders)
    assert isinstance(all_messages[4][1], GetNodeData)

    # make sure it isn't still collecting
    remote.sub_proto.send_get_block_headers(HeaderRequest(1, 1, 0, False))

    await asyncio.sleep(0.01)

    assert len(collector.get_messages()) == 0