Exemplo n.º 1
0
    async def _process_receipts(
            self, peer: ETHPeer,
            all_headers: Tuple[BlockHeader, ...]) -> Tuple[BlockHeader, ...]:
        """
        Downloads and persists the receipts for the given set of block headers.
        Some receipts may be trivial, having a blank root hash, and will not be requested.

        :param peer: to issue the receipt request to
        :param all_headers: attempt to get receipts for as many of these headers as possible
        :return: the headers for receipts that were successfully downloaded (or were trivial)
        """
        # Post-Byzantium blocks may have identical receipt roots (e.g. when they have the same
        # number of transactions and all succeed/failed: ropsten blocks 2503212 and 2503284),
        # so we do this to avoid requesting the same receipts multiple times.

        # combine headers with the same receipt root, so we can mark them as completed, later
        receipt_root_to_headers = groupby(attrgetter('receipt_root'),
                                          all_headers)

        # Ignore headers that have an empty receipt root
        trivial_headers = tuple(
            receipt_root_to_headers.pop(BLANK_ROOT_HASH, tuple()))

        # pick one of the headers for each missing receipt root
        unique_headers_needed = tuple(
            first(headers)
            for root, headers in receipt_root_to_headers.items())

        if not unique_headers_needed:
            return trivial_headers

        receipt_bundles = await self._request_receipts(peer,
                                                       unique_headers_needed)

        if not receipt_bundles:
            return trivial_headers

        try:
            await self._validate_receipts(unique_headers_needed,
                                          receipt_bundles)
        except ValidationError as err:
            self.logger.info(
                "Disconnecting from %s: sent invalid receipt: %s",
                peer,
                err,
            )
            await peer.disconnect(DisconnectReason.bad_protocol)
            return trivial_headers

        # process all of the returned receipts, storing their trie data
        # dicts in the database
        receipts, trie_roots_and_data_dicts = zip(*receipt_bundles)
        receipt_roots, trie_data_dicts = zip(*trie_roots_and_data_dicts)
        for trie_data in trie_data_dicts:
            await self.wait(self.db.coro_persist_trie_data_dict(trie_data))

        # Identify which headers have the receipt roots that are now complete.
        completed_header_groups = tuple(
            headers for root, headers in receipt_root_to_headers.items()
            if root in receipt_roots)
        newly_completed_headers = tuple(concat(completed_header_groups))

        self.logger.debug(
            "Got receipts for %d/%d headers from %s, %d trivial, from request for %r..%r",
            len(newly_completed_headers),
            len(all_headers) - len(trivial_headers),
            peer,
            len(trivial_headers),
            all_headers[0],
            all_headers[-1],
        )
        return newly_completed_headers + trivial_headers
Exemplo n.º 2
0
def test_slash_validator(genesis_state, config):
    some_epoch = (
        config.GENESIS_EPOCH
        + random.randrange(1, 2 ** 32)
        + config.EPOCHS_PER_SLASHINGS_VECTOR
    )
    earliest_slashable_epoch = some_epoch - config.EPOCHS_PER_SLASHINGS_VECTOR
    slashable_range = range(earliest_slashable_epoch, some_epoch)
    sampling_quotient = 4

    state = genesis_state.set(
        "slot",
        compute_start_slot_at_epoch(earliest_slashable_epoch, config.SLOTS_PER_EPOCH),
    )
    validator_count_to_slash = len(state.validators) // sampling_quotient
    assert validator_count_to_slash > 1
    validator_indices_to_slash = random.sample(
        range(len(state.validators)), validator_count_to_slash
    )
    # ensure case w/ one slashing in an epoch
    # by ignoring the first
    set_of_colluding_validators = validator_indices_to_slash[1:]
    # simulate multiple slashings in an epoch
    validators_grouped_by_coalition = groupby(
        lambda index: index % sampling_quotient, set_of_colluding_validators
    )
    coalition_count = len(validators_grouped_by_coalition)
    slashings = {
        epoch: coalition
        for coalition, epoch in zip(
            validators_grouped_by_coalition.values(),
            random.sample(slashable_range, coalition_count),
        )
    }
    another_slashing_epoch = first(random.sample(slashable_range, 1))
    while another_slashing_epoch in slashings:
        another_slashing_epoch += 1
    slashings[another_slashing_epoch] = (validator_indices_to_slash[0],)

    expected_slashings = {}
    expected_individual_penalties = {}
    for epoch, coalition in slashings.items():
        for index in coalition:
            validator = state.validators[index]
            assert validator.is_active(earliest_slashable_epoch)
            assert validator.exit_epoch == FAR_FUTURE_EPOCH
            assert validator.withdrawable_epoch == FAR_FUTURE_EPOCH

            expected_slashings = update_in(
                expected_slashings,
                [epoch],
                lambda balance: balance + state.validators[index].effective_balance,
                default=0,
            )
            expected_individual_penalties = update_in(
                expected_individual_penalties,
                [index],
                lambda penalty: (
                    penalty
                    + (
                        state.validators[index].effective_balance
                        // config.MIN_SLASHING_PENALTY_QUOTIENT
                    )
                ),
                default=0,
            )

    # emulate slashings across the current slashable range
    expected_proposer_rewards = {}
    for epoch, coalition in slashings.items():
        state = state.set(
            "slot", compute_start_slot_at_epoch(epoch, config.SLOTS_PER_EPOCH)
        )

        expected_total_slashed_balance = expected_slashings[epoch]
        proposer_index = get_beacon_proposer_index(state, CommitteeConfig(config))

        expected_proposer_rewards = update_in(
            expected_proposer_rewards,
            [proposer_index],
            lambda reward: reward
            + (expected_total_slashed_balance // config.WHISTLEBLOWER_REWARD_QUOTIENT),
            default=0,
        )
        for index in coalition:
            state = slash_validator(state, index, config)

    state = state.set(
        "slot", compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH)
    )
    # verify result
    for epoch, coalition in slashings.items():
        for index in coalition:
            validator = state.validators[index]
            assert validator.exit_epoch != FAR_FUTURE_EPOCH
            assert validator.slashed
            assert validator.withdrawable_epoch == max(
                validator.exit_epoch + config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY,
                epoch + config.EPOCHS_PER_SLASHINGS_VECTOR,
            )

            slashed_epoch_index = epoch % config.EPOCHS_PER_SLASHINGS_VECTOR
            slashed_balance = state.slashings[slashed_epoch_index]
            assert slashed_balance == expected_slashings[epoch]
            assert state.balances[index] == (
                config.MAX_EFFECTIVE_BALANCE
                - expected_individual_penalties[index]
                + expected_proposer_rewards.get(index, 0)
            )

    for proposer_index, total_reward in expected_proposer_rewards.items():
        assert state.balances[proposer_index] == (
            total_reward
            + config.MAX_EFFECTIVE_BALANCE
            - expected_individual_penalties.get(proposer_index, 0)
        )
Exemplo n.º 3
0
Arquivo: base.py Projeto: nipz/py-evm
    def validate_uncles(self, block: BaseBlock) -> None:
        """
        Validate the uncles for the given block.
        """
        has_uncles = len(block.uncles) > 0
        should_have_uncles = block.header.uncles_hash != EMPTY_UNCLE_HASH

        if not has_uncles and not should_have_uncles:
            # optimization to avoid loading ancestors from DB, since the block has no uncles
            return
        elif has_uncles and not should_have_uncles:
            raise ValidationError(
                "Block has uncles but header suggests uncles should be empty")
        elif should_have_uncles and not has_uncles:
            raise ValidationError(
                "Header suggests block should have uncles but block has none")

        # Check for duplicates
        uncle_groups = groupby(operator.attrgetter('hash'), block.uncles)
        duplicate_uncles = tuple(
            sorted(hash for hash, twins in uncle_groups.items()
                   if len(twins) > 1))
        if duplicate_uncles:
            raise ValidationError("Block contains duplicate uncles:\n"
                                  " - {0}".format(
                                      ' - '.join(duplicate_uncles)))

        recent_ancestors = tuple(ancestor for ancestor in self.get_ancestors(
            MAX_UNCLE_DEPTH + 1, header=block.header))
        recent_ancestor_hashes = {
            ancestor.hash
            for ancestor in recent_ancestors
        }
        recent_uncle_hashes = _extract_uncle_hashes(recent_ancestors)

        for uncle in block.uncles:
            if uncle.hash == block.hash:
                raise ValidationError("Uncle has same hash as block")

            # ensure the uncle has not already been included.
            if uncle.hash in recent_uncle_hashes:
                raise ValidationError("Duplicate uncle: {0}".format(
                    encode_hex(uncle.hash)))

            # ensure that the uncle is not one of the canonical chain blocks.
            if uncle.hash in recent_ancestor_hashes:
                raise ValidationError(
                    "Uncle {0} cannot be an ancestor of {1}".format(
                        encode_hex(uncle.hash), encode_hex(block.hash)))

            # ensure that the uncle was built off of one of the canonical chain
            # blocks.
            if uncle.parent_hash not in recent_ancestor_hashes or (
                    uncle.parent_hash == block.header.parent_hash):
                raise ValidationError(
                    "Uncle's parent {0} is not an ancestor of {1}".format(
                        encode_hex(uncle.parent_hash), encode_hex(block.hash)))

            # Now perform VM level validation of the uncle
            self.validate_seal(uncle)

            try:
                uncle_parent = self.get_block_header_by_hash(uncle.parent_hash)
            except HeaderNotFound:
                raise ValidationError("Uncle ancestor not found: {0}".format(
                    uncle.parent_hash))

            uncle_vm_class = self.get_vm_class_for_block_number(
                uncle.block_number)
            uncle_vm_class.validate_uncle(block, uncle, uncle_parent)