示例#1
0
def test_chaindb_add_block_number_to_hash_lookup(chaindb, block):
    block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(
        block.number)
    assert not chaindb.exists(block_number_to_hash_key)
    assert chaindb.get_chain_gaps() == GENESIS_CHAIN_GAPS
    chaindb.persist_block(block)
    assert chaindb.exists(block_number_to_hash_key)
示例#2
0
 def _add_block_number_to_hash_lookup(self, header: BlockHeader) -> None:
     """
     Sets a record in the database to allow looking up this header by its
     block number.
     """
     block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(
         header.block_number)
     self.db.set(
         block_number_to_hash_key,
         rlp.encode(header.hash, sedes=rlp.sedes.binary),
     )
示例#3
0
    def _get_canonical_block_hash(db: BaseDB, block_number: BlockNumber) -> Hash32:
        validate_block_number(block_number)
        number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(block_number)

        try:
            encoded_key = db[number_to_hash_key]
        except KeyError:
            raise HeaderNotFound(
                "No canonical header for block number #{0}".format(block_number)
            )
        else:
            return rlp.decode(encoded_key, sedes=rlp.sedes.binary)
示例#4
0
    def _decanonicalize_descendant_orphans(
            cls,
            db: DatabaseAPI,
            header: BlockHeaderAPI,
            checkpoints: Tuple[Hash32, ...]) -> None:

        # Determine if any children need to be de-canonicalized because they are not children of
        #   the new chain head
        new_gaps = starting_gaps = cls._get_header_chain_gaps(db)

        child_number = BlockNumber(header.block_number + 1)
        try:
            child = cls._get_canonical_block_header_by_number(db, child_number)
        except HeaderNotFound:
            # There is no canonical block here
            next_invalid_child = None
        else:
            if child.parent_hash != header.hash:
                if child.hash in checkpoints:
                    raise CheckpointsMustBeCanonical(
                        f"Trying to decanonicalize {child} while making {header} the chain tip"
                    )
                else:
                    next_invalid_child = child
            else:
                next_invalid_child = None

        while next_invalid_child:
            # decanonicalize, and add gap for tracking
            db.delete(SchemaV1.make_block_number_to_hash_lookup_key(child_number))
            new_gaps = reopen_gap(child_number, new_gaps)

            # find next child
            child_number = BlockNumber(child_number + 1)
            try:
                # All contiguous children must now be made invalid
                next_invalid_child = cls._get_canonical_block_header_by_number(db, child_number)
            except HeaderNotFound:
                # Found the end of this streak of canonical blocks
                break
            else:
                if next_invalid_child.hash in checkpoints:
                    raise CheckpointsMustBeCanonical(
                        f"Trying to decanonicalize {next_invalid_child} while making {header} the"
                        " chain tip"
                    )

        if new_gaps != starting_gaps:
            db.set(
                SchemaV1.make_header_chain_gaps_lookup_key(),
                rlp.encode(new_gaps, sedes=chain_gaps)
            )
示例#5
0
    def _decanonicalize_single(cls, db: DatabaseAPI, block_num: BlockNumber,
                               base_gaps: ChainGaps) -> ChainGaps:
        """
        A single block number was found to no longer be canonical. At doc-time,
        this only happens because it does not link up with a checkpoint header.
        So de-canonicalize this block number and insert a gap in the tracked
        chain gaps.
        """

        db.delete(SchemaV1.make_block_number_to_hash_lookup_key(block_num))

        new_gaps = reopen_gap(block_num, base_gaps)
        if new_gaps != base_gaps:
            db.set(SchemaV1.make_header_chain_gaps_lookup_key(),
                   rlp.encode(new_gaps, sedes=chain_gaps))
        return new_gaps
示例#6
0
    def get_canonical_block_hash(self, block_number: BlockNumber) -> Hash32:
        """
        Returns the block hash for the canonical block at the given number.

        Raises BlockNotFound if there's no block header with the given number in the
        canonical chain.
        """
        validate_block_number(block_number, title="Block Number")
        number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(
            block_number)

        try:
            encoded_key = self.db[number_to_hash_key]
        except KeyError:
            raise HeaderNotFound(
                "No canonical header for block number #{0}".format(
                    block_number))
        else:
            return rlp.decode(encoded_key, sedes=rlp.sedes.binary)
示例#7
0
async def test_header_syncer(request, event_loop, event_bus, chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_to_server, server_to_client):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_to_server],
                                           event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_to_client],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_to_client.logger.info("%s is serving 1000 blocks",
                                         server_to_client)
            client_to_server.logger.info("%s is syncing up 1000",
                                         client_to_server)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1)
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                complete_chain_tip = chaindb_1000.get_canonical_head()

                # Not entirely certain that sending new block hashes is necessary, but...
                #   it shouldn't hurt anything. Trying to fix this flaky test:
                # https://app.circleci.com/pipelines/github/ethereum/trinity/6855/workflows/131f9b03-8c99-4419-8e88-d2ef216e3dbb/jobs/259263/steps  # noqa: E501

                server_to_client.eth_api.send_new_block_hashes(
                    NewBlockHash(complete_chain_tip.hash,
                                 complete_chain_tip.block_number), )

                await wait_for_head(chaindb_fresh, complete_chain_tip)
示例#8
0
def test_chaindb_add_block_number_to_hash_lookup(chaindb, block):
    block_number_to_hash_key = SchemaV1.make_block_number_to_hash_lookup_key(
        block.number)
    assert not chaindb.exists(block_number_to_hash_key)
    chaindb.persist_block(block)
    assert chaindb.exists(block_number_to_hash_key)
示例#9
0
async def test_header_syncer(request,
                             event_loop,
                             event_bus,
                             chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db),
            chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db),
        )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1
                )
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                # Do we have to do anything here to have the server notify the client
                #   that it's capable of serving more headers now? ... Apparently not.

                await wait_for_head(chaindb_fresh, chaindb_1000.get_canonical_head())