예제 #1
0
    def initialize_block(self, block_header):
        """Do initialization necessary for the consensus to claim a block,
        this may include initiating voting activates, starting proof of work
        hash generation, or create a PoET wait timer.

        Args:
            block_header (BlockHeader): the BlockHeader to initialize.
        Returns:
            True
        """
        # Using the current chain head, we need to create a state view so we
        # can get our config values.
        state_view = \
            BlockWrapper.state_view_for_block(
                self._block_cache.block_store.chain_head,
                self._state_view_factory)

        settings_view = SettingsView(state_view)
        self._min_wait_time = settings_view.get_setting(
            "suomi.consensus.min_wait_time", self._min_wait_time, int)
        self._max_wait_time = settings_view.get_setting(
            "suomi.consensus.max_wait_time", self._max_wait_time, int)
        self._valid_block_publishers = settings_view.get_setting(
            "suomi.consensus.valid_block_publishers",
            self._valid_block_publishers, list)

        block_header.consensus = b"Devmode"
        self._start_time = time.time()
        self._wait_time = random.uniform(self._min_wait_time,
                                         self._max_wait_time)
        return True
예제 #2
0
 def _transaction_index_keys(block):
     blkw = BlockWrapper.wrap(block)
     keys = []
     for batch in blkw.batches:
         for txn in batch.transactions:
             keys.append(txn.header_signature.encode())
     return keys
예제 #3
0
 def _load_consensus(self, block):
     """Load the consensus module using the state as of the given block."""
     if block is not None:
         return ConsensusFactory.get_configured_consensus_module(
             block.header_signature,
             BlockWrapper.state_view_for_block(block,
                                               self._state_view_factory))
     return ConsensusFactory.get_consensus_module('genesis')
예제 #4
0
 def add_block(self, block):
     with self.lock:
         blkw = BlockWrapper(block)
         block = self._complete_block(blkw)
         if block is not None:
             self.block_cache[block.header_signature] = blkw
             self._on_block_received(blkw)
             self._process_incomplete_blocks(block.header_signature)
         self._incomplete_blocks_length.set_value(
             len(self._incomplete_blocks))
예제 #5
0
    def on_check_publish_block(self, force=False):
        """Ask the consensus module if it is time to claim the candidate block
        if it is then, claim it and tell the world about it.
        :return:
            None
        """
        try:
            with self._lock:
                if (self._chain_head is not None
                        and self._candidate_block is None
                        and self._pending_batches):
                    self._build_candidate_block(self._chain_head)

                if self._candidate_block and (
                    force or
                    self._candidate_block.has_pending_batches()) and \
                        self._candidate_block.check_publish_block():

                    pending_batches = []  # will receive the list of batches
                    # that were not added to the block
                    injected_batch_ids = \
                        self._candidate_block.injected_batch_ids
                    last_batch = self._candidate_block.last_batch
                    block = self._candidate_block.finalize_block(
                        self._identity_signer, pending_batches)
                    self._candidate_block = None
                    # Update the _pending_batches to reflect what we learned.

                    last_batch_index = self._pending_batches.index(last_batch)
                    unsent_batches = \
                        self._pending_batches[last_batch_index + 1:]
                    self._pending_batches = pending_batches + unsent_batches

                    self._pending_batch_gauge.set_value(
                        len(self._pending_batches))

                    if block:
                        blkw = BlockWrapper(block)
                        LOGGER.info("Claimed Block: %s", blkw)
                        self._block_sender.send(
                            blkw.block, keep_batches=injected_batch_ids)
                        self._blocks_published_count.inc()

                        # We built our candidate, disable processing until
                        # the chain head is updated. Only set this if
                        # we succeeded. Otherwise try again, this
                        # can happen in cases where txn dependencies
                        # did not validate when building the block.
                        self.on_chain_updated(None)

        # pylint: disable=broad-except
        except Exception as exc:
            LOGGER.critical("on_check_publish_block exception.")
            LOGGER.exception(exc)
예제 #6
0
    def finalize_block(self, block_header):
        """Finalize a block to be claimed. Provide any signatures and
        data updates that need to be applied to the block before it is
        signed and broadcast to the network.

        Args:
            block_header (BlockHeader): The block header for the candidate
                block that needs to be finalized.
        Returns:
            Boolean: True if the candidate block good and should be generated.
            False if the block should be abandoned.
        """
        # To compute the block hash, we are going to perform a hash using the
        # previous block ID and the batch IDs contained in the block
        hasher = hashlib.sha256(block_header.previous_block_id.encode())
        for batch_id in block_header.batch_ids:
            hasher.update(batch_id.encode())
        block_hash = hasher.hexdigest()

        # Using the current chain head, we need to create a state view so we
        # can create a PoET enclave.
        state_view = \
            BlockWrapper.state_view_for_block(
                block_wrapper=self._block_cache.block_store.chain_head,
                state_view_factory=self._state_view_factory)

        poet_enclave_module = \
            factory.PoetEnclaveFactory.get_poet_enclave_module(
                state_view=state_view,
                config_dir=self._config_dir,
                data_dir=self._data_dir)

        # We need to create a wait certificate for the block and then serialize
        # that into the block header consensus field.
        active_key = self._poet_key_state_store.active_key
        poet_key_state = self._poet_key_state_store[active_key]
        sealed_signup_data = poet_key_state.sealed_signup_data
        try:
            wait_certificate = \
                WaitCertificate.create_wait_certificate(
                    poet_enclave_module=poet_enclave_module,
                    sealed_signup_data=sealed_signup_data,
                    wait_timer=self._wait_timer,
                    block_hash=block_hash)
            block_header.consensus = \
                json.dumps(wait_certificate.dump()).encode()
        except ValueError as ve:
            LOGGER.error('Failed to create wait certificate: %s', ve)
            return False

        LOGGER.debug('Created wait certificate: %s', wait_certificate)

        return True
예제 #7
0
    def deserialize_block(value):
        """
        Deserialize a byte string into a BlockWrapper

        Args:
            value (bytes): the byte string to deserialze

        Returns:
            BlockWrapper: a block wrapper instance
        """
        # Block id strings are stored under batch/txn ids for reference.
        # Only Blocks, not ids or Nones, should be returned by _get_block.
        block = Block()
        block.ParseFromString(value)
        return BlockWrapper(status=BlockStatus.Valid, block=block)
예제 #8
0
    def start(self, on_done):
        """
        Starts the genesis block creation process.  Will call the given
        `on_done` callback on successful completion.

        Args:
            on_done (function): a function called on completion

        Raises:
            InvalidGenesisStateError: raises this error if a genesis block is
                unable to be produced, or the resulting block-chain-id saved.
        """
        genesis_file = os.path.join(self._data_dir, 'genesis.batch')
        try:
            with open(genesis_file, 'rb') as batch_file:
                genesis_data = genesis_pb2.GenesisData()
                genesis_data.ParseFromString(batch_file.read())
            LOGGER.info('Producing genesis block from %s', genesis_file)
        except IOError:
            raise InvalidGenesisStateError(
                "Genesis File {} specified, but unreadable".format(
                    genesis_file))

        initial_state_root = self._context_manager.get_first_root()

        genesis_batches = [batch for batch in genesis_data.batches]
        if genesis_batches:
            scheduler = SerialScheduler(
                self._context_manager.get_squash_handler(),
                initial_state_root,
                always_persist=True)

            LOGGER.debug('Adding %s batches', len(genesis_data.batches))
            for batch in genesis_data.batches:
                scheduler.add_batch(batch)

            self._transaction_executor.execute(scheduler)

            scheduler.finalize()
            scheduler.complete(block=True)

        state_hash = initial_state_root
        for batch in genesis_batches:
            result = scheduler.get_batch_execution_result(
                batch.header_signature)
            if result is None or not result.is_valid:
                raise InvalidGenesisStateError(
                    'Unable to create genesis block, due to batch {}'.format(
                        batch.header_signature))
            if result.state_hash is not None:
                state_hash = result.state_hash
        LOGGER.debug('Produced state hash %s for genesis block.', state_hash)

        block_builder = self._generate_genesis_block()
        block_builder.add_batches(genesis_batches)
        block_builder.set_state_hash(state_hash)

        block_publisher = self._get_block_publisher(initial_state_root)
        if not block_publisher.initialize_block(block_builder.block_header):
            LOGGER.error('Consensus refused to initialize consensus block.')
            raise InvalidGenesisConsensusError(
                'Consensus refused to initialize genesis block.')

        if not block_publisher.finalize_block(block_builder.block_header):
            LOGGER.error('Consensus refused to finalize genesis block.')
            raise InvalidGenesisConsensusError(
                'Consensus refused to finalize genesis block.')

        self._sign_block(block_builder)

        block = block_builder.build_block()

        blkw = BlockWrapper(block=block, status=BlockStatus.Valid)

        LOGGER.info('Genesis block created: %s', blkw)

        self._completer.add_block(block)
        self._block_store.update_chain([blkw])

        self._chain_id_manager.save_block_chain_id(block.header_signature)

        LOGGER.debug('Deleting genesis data.')
        os.remove(genesis_file)

        if on_done is not None:
            on_done()
예제 #9
0
    def _build_candidate_block(self, chain_head):
        """ Build a candidate block and construct the consensus object to
        validate it.
        :param chain_head: The block to build on top of.
        :return: (BlockBuilder) - The candidate block in a BlockBuilder
        wrapper.
        """
        state_view = BlockWrapper.state_view_for_block(
            chain_head, self._state_view_factory)
        consensus_module = ConsensusFactory.get_configured_consensus_module(
            chain_head.header_signature, state_view)

        # using chain_head so so we can use the setting_cache
        max_batches = int(
            self._settings_cache.get_setting(
                'suomi.publisher.max_batches_per_block',
                chain_head.state_root_hash,
                default_value=0))

        public_key = self._identity_signer.get_public_key().as_hex()
        consensus = consensus_module.\
            BlockPublisher(block_cache=self._block_cache,
                           state_view_factory=self._state_view_factory,
                           batch_publisher=self._batch_publisher,
                           data_dir=self._data_dir,
                           config_dir=self._config_dir,
                           validator_id=public_key)

        batch_injectors = []
        if self._batch_injector_factory is not None:
            batch_injectors = self._batch_injector_factory.create_injectors(
                chain_head.identifier)
            if batch_injectors:
                LOGGER.debug("Loaded batch injectors: %s", batch_injectors)

        block_header = BlockHeader(
            block_num=chain_head.block_num + 1,
            previous_block_id=chain_head.header_signature,
            signer_public_key=public_key)
        block_builder = BlockBuilder(block_header)

        if not consensus.initialize_block(block_builder.block_header):
            if not self._logging_states.consensus_not_ready:
                self._logging_states.consensus_not_ready = True
                LOGGER.debug("Consensus not ready to build candidate block.")
            return None

        if self._logging_states.consensus_not_ready:
            self._logging_states.consensus_not_ready = False
            LOGGER.debug("Consensus is ready to build candidate block.")

        # create a new scheduler
        scheduler = self._transaction_executor.create_scheduler(
            self._squash_handler, chain_head.state_root_hash)

        # build the TransactionCommitCache
        committed_txn_cache = TransactionCommitCache(
            self._block_cache.block_store)

        self._transaction_executor.execute(scheduler)
        self._candidate_block = _CandidateBlock(
            self._block_cache.block_store, consensus, scheduler,
            committed_txn_cache, block_builder, max_batches, batch_injectors,
            SettingsView(state_view), public_key)

        for batch in self._pending_batches:
            if self._candidate_block.can_add_batch:
                self._candidate_block.add_batch(batch)
            else:
                break
예제 #10
0
    def _get_block(self, key):
        value = self._block_store.get(key)
        if value is None:
            raise KeyError('Block "{}" not found in store'.format(key))

        return BlockWrapper.wrap(value)
예제 #11
0
 def _block_num_index_keys(block):
     blkw = BlockWrapper.wrap(block)
     # Format the number to a 64bit hex value, for natural ordering
     return [BlockStore.block_num_to_hex(blkw.block_num).encode()]
예제 #12
0
 def _batch_index_keys(block):
     blkw = BlockWrapper.wrap(block)
     return [batch.header_signature.encode() for batch in blkw.batches]
예제 #13
0
    def initialize_block(self, block_header):
        """Do initialization necessary for the consensus to claim a block,
        this may include initiating voting activities, starting proof of work
        hash generation, or create a PoET wait timer.

        Args:
            block_header (BlockHeader): The BlockHeader to initialize.
        Returns:
            Boolean: True if the candidate block should be built. False if
            no candidate should be built.
        """
        # If the previous block ID matches our cached one, that means that we
        # have already determined that even if we initialize the requested
        # block we would not be able to claim it.  So, instead of wasting time
        # doing all of the checking again, simply short-circuit the failure so
        # that the validator can go do something more useful.
        if block_header.previous_block_id == \
                PoetBlockPublisher._previous_block_id:
            return False
        PoetBlockPublisher._previous_block_id = block_header.previous_block_id

        # Using the current chain head, we need to create a state view so we
        # can create a PoET enclave.
        state_view = \
            BlockWrapper.state_view_for_block(
                block_wrapper=self._block_cache.block_store.chain_head,
                state_view_factory=self._state_view_factory)

        poet_enclave_module = \
            factory.PoetEnclaveFactory.get_poet_enclave_module(
                state_view=state_view,
                config_dir=self._config_dir,
                data_dir=self._data_dir)

        # Get our validator registry entry to see what PoET public key
        # other validators think we are using.
        validator_registry_view = ValidatorRegistryView(state_view)
        validator_info = None

        try:
            validator_id = block_header.signer_public_key
            validator_info = \
                validator_registry_view.get_validator_info(
                    validator_id=validator_id)
        except KeyError:
            pass

        # If we don't have a validator registry entry, then check the active
        # key.  If we don't have one, then we need to sign up.  If we do have
        # one, then our validator registry entry has not percolated through the
        # system, so nothing to to but wait.
        active_poet_public_key = self._poet_key_state_store.active_key
        if validator_info is None:
            if active_poet_public_key is None:
                LOGGER.debug(
                    'No public key found, so going to register new signup '
                    'information')
                self._register_signup_information(
                    block_header=block_header,
                    poet_enclave_module=poet_enclave_module)
            else:  # Check if we need to give up on this registration attempt
                try:
                    nonce = self._poet_key_state_store[
                        active_poet_public_key].signup_nonce
                except (ValueError, AttributeError):
                    self._poet_key_state_store.active_key = None
                    LOGGER.warning(
                        'Poet Key State Store had inaccessible or '
                        'corrupt active key [%s] clearing '
                        'key.', active_poet_public_key)
                    return False

                self._handle_registration_timeout(
                    block_header=block_header,
                    poet_enclave_module=poet_enclave_module,
                    state_view=state_view,
                    signup_nonce=nonce,
                    poet_public_key=active_poet_public_key)
            return False

        # Retrieve the key state corresponding to the PoET public key in our
        # validator registry entry.
        poet_key_state = None
        try:
            poet_key_state = \
                self._poet_key_state_store[
                    validator_info.signup_info.poet_public_key]
        except (ValueError, KeyError):
            pass

        # If there is no key state associated with the PoET public key that
        # other validators think we should be using, then we need to create
        # new signup information as we have no way whatsoever to publish
        # blocks that other validators will accept.
        if poet_key_state is None:
            LOGGER.debug(
                'PoET public key %s...%s in validator registry not found in '
                'key state store.  Sign up again',
                validator_info.signup_info.poet_public_key[:8],
                validator_info.signup_info.poet_public_key[-8:])
            self._register_signup_information(
                block_header=block_header,
                poet_enclave_module=poet_enclave_module)

            # We need to put fake information in the key state store for the
            # PoET public key the other validators think we are using so that
            # we don't try to keep signing up.  However, we are going to mark
            # that key state store entry as being refreshed so that we will
            # never actually try to use it.
            dummy_data = b64encode(b'No sealed signup data').decode('utf-8')
            self._poet_key_state_store[
                validator_info.signup_info.poet_public_key] = \
                PoetKeyState(
                    sealed_signup_data=dummy_data,
                    has_been_refreshed=True,
                    signup_nonce='unknown')

            return False

        # Check the key state.  If it is marked as being refreshed, then we are
        # waiting until our PoET public key is updated in the validator
        # registry and therefore we cannot publish any blocks.
        if poet_key_state.has_been_refreshed:
            LOGGER.debug(
                'PoET public key %s...%s has been refreshed.  Wait for new '
                'key to show up in validator registry.',
                validator_info.signup_info.poet_public_key[:8],
                validator_info.signup_info.poet_public_key[-8:])

            # Check if we need to give up on this registration attempt
            self._handle_registration_timeout(
                block_header=block_header,
                poet_enclave_module=poet_enclave_module,
                state_view=state_view,
                signup_nonce=poet_key_state.signup_nonce,
                poet_public_key=active_poet_public_key)
            return False

        # If the PoET public key in the validator registry is not the active
        # one, then we need to switch the active key in the key state store.
        if validator_info.signup_info.poet_public_key != \
                active_poet_public_key:
            active_poet_public_key = validator_info.signup_info.poet_public_key
            self._poet_key_state_store.active_key = active_poet_public_key

        # Ensure that the enclave is using the appropriate keys
        try:
            unsealed_poet_public_key = \
                SignupInfo.unseal_signup_data(
                    poet_enclave_module=poet_enclave_module,
                    sealed_signup_data=poet_key_state.sealed_signup_data)
        except SystemError:
            # Signup data is unuseable
            LOGGER.error(
                'Could not unseal signup data associated with PPK: %s..%s',
                active_poet_public_key[:8], active_poet_public_key[-8:])
            self._poet_key_state_store.active_key = None
            return False

        assert active_poet_public_key == unsealed_poet_public_key

        LOGGER.debug('Using PoET public key: %s...%s',
                     active_poet_public_key[:8], active_poet_public_key[-8:])
        LOGGER.debug('Unseal signup data: %s...%s',
                     poet_key_state.sealed_signup_data[:8],
                     poet_key_state.sealed_signup_data[-8:])

        consensus_state = \
            ConsensusState.consensus_state_for_block_id(
                block_id=block_header.previous_block_id,
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                consensus_state_store=self._consensus_state_store,
                poet_enclave_module=poet_enclave_module)
        poet_settings_view = PoetSettingsView(state_view)

        # If our signup information does not pass the freshness test, then we
        # know that other validators will reject any blocks we try to claim so
        # we need to try to sign up again.
        if consensus_state.validator_signup_was_committed_too_late(
                validator_info=validator_info,
                poet_settings_view=poet_settings_view,
                block_cache=self._block_cache):
            LOGGER.info(
                'Reject building on block %s: Validator signup information '
                'not committed in a timely manner.',
                block_header.previous_block_id[:8])
            self._register_signup_information(
                block_header=block_header,
                poet_enclave_module=poet_enclave_module)
            return False

        # Using the consensus state for the block upon which we want to
        # build, check to see how many blocks we have claimed on this chain
        # with this PoET key.  If we have hit the key block claim limit, then
        # we need to check if the key has been refreshed.
        if consensus_state.validator_has_claimed_block_limit(
                validator_info=validator_info,
                poet_settings_view=poet_settings_view):
            # Because we have hit the limit, check to see if we have already
            # submitted a validator registry transaction with new signup
            # information, and therefore a new PoET public key.  If not, then
            # mark this PoET public key in the store as having been refreshed
            # and register new signup information.  Regardless, since we have
            # hit the key block claim limit, we won't even bother initializing
            # a block on this chain as it will be rejected by other
            # validators.
            poet_key_state = self._poet_key_state_store[active_poet_public_key]
            if not poet_key_state.has_been_refreshed:
                LOGGER.info('Reached block claim limit for key: %s...%s',
                            active_poet_public_key[:8],
                            active_poet_public_key[-8:])

                sealed_signup_data = poet_key_state.sealed_signup_data
                signup_nonce = poet_key_state.signup_nonce
                self._poet_key_state_store[active_poet_public_key] = \
                    PoetKeyState(
                        sealed_signup_data=sealed_signup_data,
                        has_been_refreshed=True,
                        signup_nonce=signup_nonce)

                # Release enclave resources for this identity
                # This signup will be invalid on all forks that use it,
                # even if there is a rollback to a point it should be valid.
                # A more sophisticated policy would be to release signups
                # only at a block depth where finality probability
                # is high.
                SignupInfo.release_signup_data(
                    poet_enclave_module=poet_enclave_module,
                    sealed_signup_data=sealed_signup_data)

                self._register_signup_information(
                    block_header=block_header,
                    poet_enclave_module=poet_enclave_module)

            LOGGER.info(
                'Reject building on block %s: Validator has reached maximum '
                'number of blocks with key pair.',
                block_header.previous_block_id[:8])
            return False

        # Verify that we are abiding by the block claim delay (i.e., waiting a
        # certain number of blocks since our validator registry was added/
        # updated).
        if consensus_state.validator_is_claiming_too_early(
                validator_info=validator_info,
                block_number=block_header.block_num,
                validator_registry_view=validator_registry_view,
                poet_settings_view=poet_settings_view,
                block_store=self._block_cache.block_store):
            LOGGER.info(
                'Reject building on block %s: Validator has not waited long '
                'enough since registering validator information.',
                block_header.previous_block_id[:8])
            return False

        # We need to create a wait timer for the block...this is what we
        # will check when we are asked if it is time to publish the block
        poet_key_state = self._poet_key_state_store[active_poet_public_key]
        sealed_signup_data = poet_key_state.sealed_signup_data
        previous_certificate_id = \
            utils.get_previous_certificate_id(
                block_header=block_header,
                block_cache=self._block_cache,
                poet_enclave_module=poet_enclave_module)
        wait_timer = \
            WaitTimer.create_wait_timer(
                poet_enclave_module=poet_enclave_module,
                sealed_signup_data=sealed_signup_data,
                validator_address=block_header.signer_public_key,
                previous_certificate_id=previous_certificate_id,
                consensus_state=consensus_state,
                poet_settings_view=poet_settings_view)

        # NOTE - we do the zTest after we create the wait timer because we
        # need its population estimate to see if this block would be accepted
        # by other validators based upon the zTest.

        # Check to see if by chance we were to be able to claim this block
        # if it would result in us winning more frequently than statistically
        # expected.  If so, then refuse to initialize the block because other
        # validators will not accept anyway.
        if consensus_state.validator_is_claiming_too_frequently(
                validator_info=validator_info,
                previous_block_id=block_header.previous_block_id,
                poet_settings_view=poet_settings_view,
                population_estimate=wait_timer.population_estimate(
                    poet_settings_view=poet_settings_view),
                block_cache=self._block_cache,
                poet_enclave_module=poet_enclave_module):
            LOGGER.info(
                'Reject building on block %s: Validator is claiming blocks '
                'too frequently.', block_header.previous_block_id[:8])
            return False

        # At this point, we know that if we are able to claim the block we are
        # initializing, we will not be prevented from doing so because of PoET
        # policies.

        self._wait_timer = wait_timer
        PoetBlockPublisher._previous_block_id = None

        LOGGER.debug('Created wait timer: %s', self._wait_timer)

        return True
예제 #14
0
    def verify_block(self, block_wrapper):
        """Check that the block received conforms to the consensus rules.

        Args:
            block_wrapper (BlockWrapper): The block to validate.
        Returns:
            Boolean: True if the Block is valid, False if the block is invalid.
        """
        # Get the state view for the previous block in the chain so we can
        # create a PoET enclave and validator registry view
        previous_block = None
        try:
            previous_block = \
                self._block_cache[block_wrapper.previous_block_id]
        except KeyError:
            pass

        state_view = \
            BlockWrapper.state_view_for_block(
                block_wrapper=previous_block,
                state_view_factory=self._state_view_factory)

        poet_enclave_module = \
            factory.PoetEnclaveFactory.get_poet_enclave_module(
                state_view=state_view,
                config_dir=self._config_dir,
                data_dir=self._data_dir)

        validator_registry_view = ValidatorRegistryView(state_view)
        # Grab the validator info based upon the block signer's public
        # key
        try:
            validator_info = \
                validator_registry_view.get_validator_info(
                    block_wrapper.header.signer_public_key)
        except KeyError:
            LOGGER.error(
                'Block %s rejected: Received block from an unregistered '
                'validator %s...%s',
                block_wrapper.identifier[:8],
                block_wrapper.header.signer_public_key[:8],
                block_wrapper.header.signer_public_key[-8:])
            return False

        LOGGER.debug(
            'Block Signer Name=%s, ID=%s...%s, PoET public key='
            '%s...%s',
            validator_info.name,
            validator_info.id[:8],
            validator_info.id[-8:],
            validator_info.signup_info.poet_public_key[:8],
            validator_info.signup_info.poet_public_key[-8:])

        # For the candidate block, reconstitute the wait certificate
        # and verify that it is valid
        wait_certificate = \
            utils.deserialize_wait_certificate(
                block=block_wrapper,
                poet_enclave_module=poet_enclave_module)
        if wait_certificate is None:
            LOGGER.error(
                'Block %s rejected: Block from validator %s (ID=%s...%s) was '
                'not created by PoET consensus module',
                block_wrapper.identifier[:8],
                validator_info.name,
                validator_info.id[:8],
                validator_info.id[-8:])
            return False

        # Get the consensus state and PoET configuration view for the block
        # that is being built upon
        consensus_state = \
            ConsensusState.consensus_state_for_block_id(
                block_id=block_wrapper.previous_block_id,
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                consensus_state_store=self._consensus_state_store,
                poet_enclave_module=poet_enclave_module)
        poet_settings_view = PoetSettingsView(state_view=state_view)

        previous_certificate_id = \
            utils.get_previous_certificate_id(
                block_header=block_wrapper.header,
                block_cache=self._block_cache,
                poet_enclave_module=poet_enclave_module)
        try:
            wait_certificate.check_valid(
                poet_enclave_module=poet_enclave_module,
                previous_certificate_id=previous_certificate_id,
                poet_public_key=validator_info.signup_info.poet_public_key,
                consensus_state=consensus_state,
                poet_settings_view=poet_settings_view)
        except ValueError as error:
            LOGGER.error(
                'Block %s rejected: Wait certificate check failed - %s',
                block_wrapper.identifier[:8],
                error)
            return False

        # Reject the block if the validator signup information fails the
        # freshness check.
        if consensus_state.validator_signup_was_committed_too_late(
                validator_info=validator_info,
                poet_settings_view=poet_settings_view,
                block_cache=self._block_cache):
            LOGGER.error(
                'Block %s rejected: Validator signup information not '
                'committed in a timely manner.',
                block_wrapper.identifier[:8])
            return False

        # Reject the block if the validator has already claimed the key block
        # limit for its current PoET key pair.
        if consensus_state.validator_has_claimed_block_limit(
                validator_info=validator_info,
                poet_settings_view=poet_settings_view):
            LOGGER.error(
                'Block %s rejected: Validator has reached maximum number of '
                'blocks with key pair.',
                block_wrapper.identifier[:8])
            return False

        # Reject the block if the validator has not waited the required number
        # of blocks between when the block containing its validator registry
        # transaction was committed to the chain and trying to claim this
        # block
        if consensus_state.validator_is_claiming_too_early(
                validator_info=validator_info,
                block_number=block_wrapper.block_num,
                validator_registry_view=validator_registry_view,
                poet_settings_view=poet_settings_view,
                block_store=self._block_cache.block_store):
            LOGGER.error(
                'Block %s rejected: Validator has not waited long enough '
                'since registering validator information.',
                block_wrapper.identifier[:8])
            return False

        # Reject the block if the validator is claiming blocks at a rate that
        # is more frequent than is statistically allowed (i.e., zTest)
        if consensus_state.validator_is_claiming_too_frequently(
                validator_info=validator_info,
                previous_block_id=block_wrapper.previous_block_id,
                poet_settings_view=poet_settings_view,
                population_estimate=wait_certificate.population_estimate(
                    poet_settings_view=poet_settings_view),
                block_cache=self._block_cache,
                poet_enclave_module=poet_enclave_module):
            LOGGER.error(
                'Block %s rejected: Validator is claiming blocks too '
                'frequently.',
                block_wrapper.identifier[:8])
            return False

        return True