Пример #1
0
    def __init__(self, consensus_module, block_cache, new_block,
                 state_view_factory, done_cb, executor, squash_handler,
                 identity_signing_key, data_dir, config_dir,
                 permission_verifier):
        """Initialize the BlockValidator
        Args:
             consensus_module: The consensus module that contains
             implementation of the consensus algorithm to use for block
             validation.
             block_cache: The cache of all recent blocks and the processing
             state associated with them.
             new_block: The block to validate.
             state_view_factory: The factory object to create.
             done_cb: The method to call when block validation completed
             executor: The thread pool to process block validations.
             squash_handler: A parameter passed when creating transaction
             schedulers.
             identity_signing_key: Private key for signing blocks.
             data_dir: Path to location where persistent data for the
             consensus module can be stored.
             config_dir: Path to location where config data for the
             consensus module can be found.
        Returns:
            None
        """
        self._consensus_module = consensus_module
        self._block_cache = block_cache
        self._chain_commit_state = ChainCommitState(
            self._block_cache.block_store, [])
        self._new_block = new_block

        # Set during execution of the of the  BlockValidation to the current
        # chain_head at that time.
        self._chain_head = None

        self._state_view_factory = state_view_factory
        self._done_cb = done_cb
        self._executor = executor
        self._squash_handler = squash_handler
        self._identity_signing_key = identity_signing_key
        self._identity_public_key = \
            signing.generate_public_key(self._identity_signing_key)
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._result = {
            'new_block': new_block,
            'chain_head': None,
            'new_chain': [],
            'cur_chain': [],
            'committed_batches': [],
            'uncommitted_batches': [],
            'execution_results': [],
            'num_transactions': 0
        }
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = \
            ValidationRuleEnforcer(SettingsViewFactory(state_view_factory))
Пример #2
0
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = ValidationRuleEnforcer(
            SettingsViewFactory(state_view_factory))

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()
Пример #3
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = ValidationRuleEnforcer(
            SettingsViewFactory(state_view_factory))

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()

        # Blocks that are currently being processed
        self._blocks_processing = ConcurrentSet()

        # Descendant blocks that are waiting for an in process block
        # to complete
        self._blocks_pending = ConcurrentMultiMap()

    def stop(self):
        self._thread_pool.shutdown(wait=True)

    def _get_previous_block_state_root(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    def _validate_batches_in_block(self, blkw, prev_state_root):
        """
        Validate all batches in the block. This includes:
            - Validating all transaction dependencies are met
            - Validating there are no duplicate batches or transactions
            - Validating execution of all batches in the block produces the
              correct state root hash

        Args:
            blkw: the block of batches to validate
            prev_state_root: the state root to execute transactions on top of

        Raises:
            BlockValidationError:
                If validation fails, raises this error with the reason.
            MissingDependency:
                Validation failed because of a missing dependency.
            DuplicateTransaction:
                Validation failed because of a duplicate transaction.
            DuplicateBatch:
                Validation failed because of a duplicate batch.
        """
        if not blkw.block.batches:
            return

        try:
            chain_commit_state = ChainCommitState(
                blkw.previous_block_id, self._block_cache,
                self._block_cache.block_store)

            scheduler = self._transaction_executor.create_scheduler(
                self._squash_handler, prev_state_root)
            self._transaction_executor.execute(scheduler)

            chain_commit_state.check_for_duplicate_batches(blkw.block.batches)

            transactions = []
            for batch in blkw.block.batches:
                transactions.extend(batch.transactions)

            chain_commit_state.check_for_duplicate_transactions(transactions)

            chain_commit_state.check_for_transaction_dependencies(transactions)

            for batch, has_more in look_ahead(blkw.block.batches):
                if has_more:
                    scheduler.add_batch(batch)
                else:
                    scheduler.add_batch(batch, blkw.state_root_hash)

        except (DuplicateBatch, DuplicateTransaction,
                MissingDependency) as err:
            scheduler.cancel()
            raise BlockValidationError("Block {} failed validation: {}".format(
                blkw, err))

        except Exception:
            scheduler.cancel()
            raise

        scheduler.finalize()
        scheduler.complete(block=True)
        state_hash = None

        for batch in blkw.batches:
            batch_result = scheduler.get_batch_execution_result(
                batch.header_signature)
            if batch_result is not None and batch_result.is_valid:
                txn_results = \
                    scheduler.get_transaction_execution_results(
                        batch.header_signature)
                blkw.execution_results.extend(txn_results)
                state_hash = batch_result.state_hash
                blkw.num_transactions += len(batch.transactions)
            else:
                raise BlockValidationError(
                    "Block {} failed validation: Invalid batch "
                    "{}".format(blkw, batch))

        if blkw.state_root_hash != state_hash:
            raise BlockValidationError(
                "Block {} failed state root hash validation. Expected {}"
                " but got {}".format(blkw, blkw.state_root_hash, state_hash))

    def _validate_permissions(self, blkw, prev_state_root):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, prev_state_root, from_state=True):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw, prev_state_root):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            return self._validation_rule_enforcer.validate(
                blkw, prev_state_root)
        return True

    def validate_block(self, blkw, chain_head=None):
        if blkw.status == BlockStatus.Valid:
            return
        elif blkw.status == BlockStatus.Invalid:
            raise BlockValidationError(
                'Block {} is already invalid'.format(blkw))

        # pylint: disable=broad-except
        try:
            if chain_head is None:
                # Try to get the chain head from the block store; note that the
                # block store may also return None for the chain head if a
                # genesis block hasn't been committed yet.
                chain_head = self._block_cache.block_store.chain_head

            try:
                prev_state_root = self._get_previous_block_state_root(blkw)
            except KeyError:
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor'.format(
                        blkw))

            if not self._validate_permissions(blkw, prev_state_root):
                raise BlockValidationError(
                    'Block {} failed permission validation'.format(blkw))

            try:
                prev_block = self._block_cache[blkw.previous_block_id]
            except KeyError:
                prev_block = None

            consensus = self._load_consensus(prev_block)
            public_key = \
                self._identity_signer.get_public_key().as_hex()
            consensus_block_verifier = consensus.BlockVerifier(
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                validator_id=public_key)

            if not consensus_block_verifier.verify_block(blkw):
                raise BlockValidationError(
                    'Block {} failed {} consensus validation'.format(
                        blkw, consensus))

            if not self._validate_on_chain_rules(blkw, prev_state_root):
                raise BlockValidationError(
                    'Block {} failed on-chain validation rules'.format(blkw))

            self._validate_batches_in_block(blkw, prev_state_root)

            # since changes to the chain-head can change the state of the
            # blocks in BlockStore we have to revalidate this block.
            block_store = self._block_cache.block_store

            # The chain_head is None when this is the genesis block or if the
            # block store has no chain_head.
            if chain_head is not None:
                if chain_head.identifier != block_store.chain_head.identifier:
                    raise ChainHeadUpdated()

            blkw.status = BlockStatus.Valid

        except BlockValidationError as err:
            blkw.status = BlockStatus.Invalid
            raise err

        except ChainHeadUpdated as e:
            raise e

        except Exception as e:
            LOGGER.exception(
                "Unhandled exception BlockValidator.validate_block()")
            raise e

    @staticmethod
    def _compare_chain_height(head_a, head_b):
        """Returns True if head_a is taller, False if head_b is taller, and
        True if the heights are the same."""
        return head_a.block_num - head_b.block_num >= 0

    def _build_fork_diff_to_common_height(self, head_long, head_short):
        """Returns a list of blocks on the longer chain since the greatest
        common height between the two chains. Note that the chains may not
        have the same block id at the greatest common height.

        Args:
            head_long (BlockWrapper)
            head_short (BlockWrapper)

        Returns:
            (list of BlockWrapper) All blocks in the longer chain since the
            last block in the shorter chain. Ordered newest to oldest.

        Raises:
            BlockValidationError
                The block is missing a predecessor. Note that normally this
                shouldn't happen because of the completer."""
        fork_diff = []

        last = head_short.block_num
        blk = head_long

        while blk.block_num > last:
            if blk.previous_block_id == NULL_BLOCK_IDENTIFIER:
                break

            fork_diff.append(blk)
            try:
                blk = self._block_cache[blk.previous_block_id]
            except KeyError:
                LOGGER.debug(
                    "Failed to build fork diff due to missing predecessor: %s",
                    blk)

                # Mark all blocks in the longer chain since the invalid block
                # as invalid.
                for blk in fork_diff:
                    blk.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Failed to build fork diff: block {} missing predecessor'.
                    format(blk))

        return blk, fork_diff

    def _extend_fork_diff_to_common_ancestor(self, new_blkw, cur_blkw,
                                             new_chain, cur_chain):
        """ Finds a common ancestor of the two chains. new_blkw and cur_blkw
        must be at the same height, or this will always fail.
        """
        while cur_blkw.identifier != new_blkw.identifier:
            if (cur_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER
                    or new_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER):
                # We are at a genesis block and the blocks are not the same
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Block {} rejected due to wrong genesis {}'.format(
                        cur_blkw, new_blkw))

            new_chain.append(new_blkw)
            try:
                new_blkw = self._block_cache[new_blkw.previous_block_id]
            except KeyError:
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationError(
                    'Block {} rejected due to missing predecessor {}'.format(
                        new_blkw, new_blkw.previous_block_id))

            cur_chain.append(cur_blkw)
            cur_blkw = self._block_cache[cur_blkw.previous_block_id]

    def _compare_forks_consensus(self, chain_head, new_block):
        """Ask the consensus module which fork to choose.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        consensus = self._load_consensus(chain_head)
        fork_resolver = consensus.ForkResolver(
            block_cache=self._block_cache,
            state_view_factory=self._state_view_factory,
            data_dir=self._data_dir,
            config_dir=self._config_dir,
            validator_id=public_key)

        return fork_resolver.compare_forks(chain_head, new_block)

    def _load_consensus(self, block):
        """Load the consensus module using the state as of the given block."""
        if block is not None:
            return ConsensusFactory.get_configured_consensus_module(
                block.header_signature,
                BlockWrapper.state_view_for_block(block,
                                                  self._state_view_factory))
        return ConsensusFactory.get_consensus_module('genesis')

    @staticmethod
    def _get_batch_commit_changes(new_chain, cur_chain):
        """
        Get all the batches that should be committed from the new chain and
        all the batches that should be uncommitted from the current chain.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def submit_blocks_for_verification(self, blocks, callback):
        for block in blocks:
            if self.in_process(block.header_signature):
                LOGGER.debug("Block already in process: %s", block)
                continue

            if self.in_process(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' in process,"
                    " adding '%s' pending", block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            if self.in_pending(block.previous_block_id):
                LOGGER.debug(
                    "Previous block '%s' is pending,"
                    " adding '%s' pending", block.previous_block_id, block)
                self._add_block_to_pending(block)
                continue

            LOGGER.debug("Adding block %s for processing", block.identifier)

            # Add the block to the set of blocks being processed
            self._blocks_processing.add(block.identifier)

            # Schedule the block for processing
            self._thread_pool.submit(self.process_block_verification, block,
                                     self._wrap_callback(block, callback))

    def _wrap_callback(self, block, callback):
        # Internal cleanup after verification
        def wrapper(commit_new_block, result):
            LOGGER.debug("Removing block from processing %s",
                         block.identifier[:6])
            try:
                self._blocks_processing.remove(block.identifier)
            except KeyError:
                LOGGER.warning(
                    "Tried to remove block from in process but it"
                    " wasn't in processes: %s", block.identifier)

            # If the block is invalid, mark all descendant blocks as invalid
            # and remove from pending.
            if block.status == BlockStatus.Valid:
                blocks_now_ready = self._blocks_pending.pop(
                    block.identifier, [])
                self.submit_blocks_for_verification(blocks_now_ready, callback)

            else:
                # Get all the pending blocks that can now be processed
                blocks_now_invalid = self._blocks_pending.pop(
                    block.identifier, [])

                while blocks_now_invalid:
                    invalid_block = blocks_now_invalid.pop()
                    invalid_block.status = BlockStatus.Invalid

                    LOGGER.debug('Marking descendant block invalid: %s',
                                 invalid_block)

                    # Get descendants of the descendant
                    blocks_now_invalid.extend(
                        self._blocks_pending.pop(invalid_block.identifier, []))

            callback(commit_new_block, result)

        return wrapper

    def in_process(self, block_id):
        return block_id in self._blocks_processing

    def in_pending(self, block_id):
        return block_id in self._blocks_pending

    def _add_block_to_pending(self, block):
        previous = block.previous_block_id
        self._blocks_pending.append(previous, block)

    def process_block_verification(self, block, callback):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            result = BlockValidationResult(block)
            LOGGER.info("Starting block validation of : %s", block)

            # Get the current chain_head and store it in the result
            chain_head = self._block_cache.block_store.chain_head
            result.chain_head = chain_head

            # Create new local variables for current and new block, since
            # these variables get modified later
            current_block = chain_head
            new_block = block

            try:
                # Get all the blocks since the greatest common height from the
                # longer chain.
                if self._compare_chain_height(current_block, new_block):
                    current_block, result.current_chain =\
                        self._build_fork_diff_to_common_height(
                            current_block, new_block)
                else:
                    new_block, result.new_chain =\
                        self._build_fork_diff_to_common_height(
                            new_block, current_block)

                # Add blocks to the two chains until a common ancestor is found
                # or raise an exception if no common ancestor is found
                self._extend_fork_diff_to_common_ancestor(
                    new_block, current_block, result.new_chain,
                    result.current_chain)
            except BlockValidationError as err:
                LOGGER.warning('%s', err)
                callback(False, result)
                return

            valid = True
            for blk in reversed(result.new_chain):
                if valid:
                    try:
                        self.validate_block(blk, chain_head)
                    except BlockValidationError as err:
                        LOGGER.warning('Block %s failed validation: %s', blk,
                                       err)
                        valid = False
                    result.transaction_count += block.num_transactions
                else:
                    LOGGER.info(
                        "Block marked invalid(invalid predecessor): %s", blk)
                    blk.status = BlockStatus.Invalid

            if not valid:
                callback(False, result)
                return

            # Ask consensus if the new chain should be committed
            LOGGER.info(
                "Comparing current chain head '%s' against new block '%s'",
                chain_head, new_block)
            for i in range(
                    max(len(result.new_chain), len(result.current_chain))):
                cur = new = num = "-"
                if i < len(result.current_chain):
                    cur = result.current_chain[i].header_signature[:8]
                    num = result.current_chain[i].block_num
                if i < len(result.new_chain):
                    new = result.new_chain[i].header_signature[:8]
                    num = result.new_chain[i].block_num
                LOGGER.info(
                    "Fork comparison at height %s is between %s and %s", num,
                    cur, new)

            commit_new_chain = self._compare_forks_consensus(chain_head, block)

            # If committing the new chain, get the list of committed batches
            # from the current chain that need to be uncommitted and the list
            # of uncommitted batches from the new chain that need to be
            # committed.
            if commit_new_chain:
                commit, uncommit =\
                    self._get_batch_commit_changes(
                        result.new_chain, result.current_chain)
                result.committed_batches = commit
                result.uncommitted_batches = uncommit

                if result.new_chain[0].previous_block_id \
                        != chain_head.identifier:
                    self._moved_to_fork_count.inc()

            # Pass the results to the callback function
            callback(commit_new_chain, result)
            LOGGER.info("Finished block validation of: %s", block)

        except ChainHeadUpdated:
            callback(False, result)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s", block)
            # callback to clean up the block out of the processing list.
            callback(False, result)
Пример #4
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """
    def __init__(self, consensus_module, block_cache, new_block,
                 state_view_factory, done_cb, executor, squash_handler,
                 identity_signer, data_dir, config_dir, permission_verifier):
        """Initialize the BlockValidator
        Args:
             consensus_module: The consensus module that contains
             implementation of the consensus algorithm to use for block
             validation.
             block_cache: The cache of all recent blocks and the processing
             state associated with them.
             new_block: The block to validate.
             state_view_factory: The factory object to create.
             done_cb: The method to call when block validation completed
             executor: The thread pool to process block validations.
             squash_handler: A parameter passed when creating transaction
             schedulers.
             identity_signer: A cryptographic signer for signing blocks.
             data_dir: Path to location where persistent data for the
             consensus module can be stored.
             config_dir: Path to location where config data for the
             consensus module can be found.
        Returns:
            None
        """
        self._consensus_module = consensus_module
        self._block_cache = block_cache
        self._chain_commit_state = ChainCommitState(
            self._block_cache.block_store, [])
        self._new_block = new_block

        # Set during execution of the of the  BlockValidation to the current
        # chain_head at that time.
        self._chain_head = None

        self._state_view_factory = state_view_factory
        self._done_cb = done_cb
        self._executor = executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._result = {
            'new_block': new_block,
            'chain_head': None,
            'new_chain': [],
            'cur_chain': [],
            'committed_batches': [],
            'uncommitted_batches': [],
            'execution_results': [],
            'num_transactions': 0
        }
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = \
            ValidationRuleEnforcer(SettingsViewFactory(state_view_factory))

    def _get_previous_block_root_state_hash(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    def _txn_header(self, txn):
        txn_hdr = TransactionHeader()
        txn_hdr.ParseFromString(txn.header)
        return txn_hdr

    def _verify_batch_transactions(self, batch):
        """Verify that all transactions in are unique and that all
        transactions dependencies in this batch have been satisfied, ie
        already committed by this block or prior block in the chain.

        :param batch: the batch to verify
        :return:
        Boolean: True if all dependencies are present and all transactions
        are unique.
        """
        for txn in batch.transactions:
            txn_hdr = self._txn_header(txn)
            if self._chain_commit_state. \
                    has_transaction(txn.header_signature):
                LOGGER.debug(
                    "Block rejected due to duplicate" +
                    " transaction, transaction: %s", txn.header_signature[:8])
                raise InvalidBatch()
            for dep in txn_hdr.dependencies:
                if not self._chain_commit_state.has_transaction(dep):
                    LOGGER.debug(
                        "Block rejected due to missing "
                        "transaction dependency, transaction %s "
                        "depends on %s", txn.header_signature[:8], dep[:8])
                    raise InvalidBatch()
            self._chain_commit_state.add_txn(txn.header_signature)

    def _verify_block_batches(self, blkw):
        if blkw.block.batches:
            prev_state = self._get_previous_block_root_state_hash(blkw)
            scheduler = self._executor.create_scheduler(
                self._squash_handler, prev_state)
            self._executor.execute(scheduler)
            try:
                for batch, has_more in look_ahead(blkw.block.batches):
                    if self._chain_commit_state.has_batch(
                            batch.header_signature):
                        LOGGER.debug(
                            "Block(%s) rejected due to duplicate "
                            "batch, batch: %s", blkw,
                            batch.header_signature[:8])
                        raise InvalidBatch()

                    self._verify_batch_transactions(batch)
                    self._chain_commit_state.add_batch(batch,
                                                       add_transactions=False)
                    if has_more:
                        scheduler.add_batch(batch)
                    else:
                        scheduler.add_batch(batch, blkw.state_root_hash)
            except InvalidBatch:
                LOGGER.debug(
                    "Invalid batch %s encountered during "
                    "verification of block %s", batch.header_signature[:8],
                    blkw)
                scheduler.cancel()
                return False
            except Exception:
                scheduler.cancel()
                raise

            scheduler.finalize()
            scheduler.complete(block=True)
            state_hash = None

            for batch in blkw.batches:
                batch_result = scheduler.get_batch_execution_result(
                    batch.header_signature)
                if batch_result is not None and batch_result.is_valid:
                    txn_results = \
                        scheduler.get_transaction_execution_results(
                            batch.header_signature)
                    self._result["execution_results"].extend(txn_results)
                    state_hash = batch_result.state_hash
                    self._result["num_transactions"] = \
                        self._result["num_transactions"] \
                        + len(batch.transactions)
                else:
                    return False
            if blkw.state_root_hash != state_hash:
                LOGGER.debug(
                    "Block(%s) rejected due to state root hash "
                    "mismatch: %s != %s", blkw, blkw.state_root_hash,
                    state_hash)
                return False
        return True

    def _validate_permissions(self, blkw):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            try:
                state_root = self._get_previous_block_root_state_hash(blkw)
            except KeyError:
                LOGGER.debug(
                    "Block rejected due to missing" + " predecessor: %s", blkw)
                return False
            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, state_root):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            try:
                state_root = self._get_previous_block_root_state_hash(blkw)
            except KeyError:
                LOGGER.debug(
                    "Block rejected due to missing" + " predecessor: %s", blkw)
                return False
            return self._validation_rule_enforcer.validate(blkw, state_root)
        return True

    def validate_block(self, blkw):
        # pylint: disable=broad-except
        try:
            if blkw.status == BlockStatus.Valid:
                return True
            elif blkw.status == BlockStatus.Invalid:
                return False
            else:
                valid = True

                valid = self._validate_permissions(blkw)

                if valid:
                    public_key = \
                        self._identity_signer.get_public_key().as_hex()
                    consensus = self._consensus_module.BlockVerifier(
                        block_cache=self._block_cache,
                        state_view_factory=self._state_view_factory,
                        data_dir=self._data_dir,
                        config_dir=self._config_dir,
                        validator_id=public_key)
                    valid = consensus.verify_block(blkw)

                if valid:
                    valid = self._validate_on_chain_rules(blkw)

                if valid:
                    valid = self._verify_block_batches(blkw)

                # since changes to the chain-head can change the state of the
                # blocks in BlockStore we have to revalidate this block.
                block_store = self._block_cache.block_store
                if self._chain_head is not None and\
                        self._chain_head.identifier !=\
                        block_store.chain_head.identifier:
                    raise ChainHeadUpdated()

                blkw.status = BlockStatus.Valid if\
                    valid else BlockStatus.Invalid
                return valid
        except ChainHeadUpdated:
            raise
        except Exception:
            LOGGER.exception(
                "Unhandled exception BlockPublisher.validate_block()")
            return False

    def _find_common_height(self, new_chain, cur_chain):
        """
        Walk back on the longest chain until we find a predecessor that is the
        same height as the other chain.
        The blocks are recorded in the corresponding lists
        and the blocks at the same height are returned
        """
        new_blkw = self._new_block
        cur_blkw = self._chain_head
        # 1) find the common ancestor of this block in the current chain
        # Walk back until we have both chains at the same length

        # Walk back the new chain to find the block that is the
        # same height as the current head.
        if new_blkw.block_num > cur_blkw.block_num:
            # new chain is longer
            # walk the current chain back until we find the block that is the
            # same height as the current chain.
            while new_blkw.block_num > cur_blkw.block_num and \
                    new_blkw.previous_block_id != NULL_BLOCK_IDENTIFIER:
                new_chain.append(new_blkw)
                try:
                    new_blkw = \
                        self._block_cache[
                            new_blkw.previous_block_id]
                except KeyError:
                    LOGGER.debug(
                        "Block rejected due to missing" + " predecessor: %s",
                        new_blkw)
                    for b in new_chain:
                        b.status = BlockStatus.Invalid
                    raise BlockValidationAborted()
        elif new_blkw.block_num < cur_blkw.block_num:
            # current chain is longer
            # walk the current chain back until we find the block that is the
            # same height as the new chain.
            while cur_blkw.block_num > \
                    new_blkw.block_num \
                    and new_blkw.previous_block_id != \
                    NULL_BLOCK_IDENTIFIER:
                cur_chain.append(cur_blkw)
                cur_blkw = self._block_cache[cur_blkw.previous_block_id]
        return (new_blkw, cur_blkw)

    def _find_common_ancestor(self, new_blkw, cur_blkw, new_chain, cur_chain):
        """ Finds a common ancestor of the two chains.
        """
        while cur_blkw.identifier != \
                new_blkw.identifier:
            if cur_blkw.previous_block_id ==  \
                    NULL_BLOCK_IDENTIFIER or \
                    new_blkw.previous_block_id == \
                    NULL_BLOCK_IDENTIFIER:
                # We are at a genesis block and the blocks are not the
                # same
                LOGGER.info("Block rejected due to wrong genesis: %s %s",
                            cur_blkw, new_blkw)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()
            new_chain.append(new_blkw)
            try:
                new_blkw = \
                    self._block_cache[
                        new_blkw.previous_block_id]
            except KeyError:
                LOGGER.debug(
                    "Block rejected due to missing" + " predecessor: %s",
                    new_blkw)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()

            cur_chain.append(cur_blkw)
            cur_blkw = \
                self._block_cache[cur_blkw.previous_block_id]

    def _test_commit_new_chain(self):
        """ Compare the two chains and determine which should be the head.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        fork_resolver = self._consensus_module.\
            ForkResolver(block_cache=self._block_cache,
                         state_view_factory=self._state_view_factory,
                         data_dir=self._data_dir,
                         config_dir=self._config_dir,
                         validator_id=public_key)

        return fork_resolver.compare_forks(self._chain_head, self._new_block)

    def _compute_batch_change(self, new_chain, cur_chain):
        """
        Compute the batch change sets.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def run(self):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            LOGGER.info("Starting block validation of : %s", self._new_block)
            cur_chain = self._result["cur_chain"]  # ordered list of the
            # current chain blocks
            new_chain = self._result["new_chain"]  # ordered list of the new
            # chain blocks

            # get the current chain_head.
            self._chain_head = self._block_cache.block_store.chain_head
            self._result['chain_head'] = self._chain_head

            # 1) Find the common ancestor block, the root of the fork.
            # walk back till both chains are the same height
            (new_blkw,
             cur_blkw) = self._find_common_height(new_chain, cur_chain)

            # 2) Walk back until we find the common ancestor
            self._find_common_ancestor(new_blkw, cur_blkw, new_chain,
                                       cur_chain)

            # 3) Determine the validity of the new fork
            # build the transaction cache to simulate the state of the
            # chain at the common root.
            self._chain_commit_state = ChainCommitState(
                self._block_cache.block_store, cur_chain)

            valid = True
            for block in reversed(new_chain):
                if valid:
                    if not self.validate_block(block):
                        LOGGER.info("Block validation failed: %s", block)
                        valid = False
                else:
                    LOGGER.info(
                        "Block marked invalid(invalid predecessor): " + "%s",
                        block)
                    block.status = BlockStatus.Invalid

            if not valid:
                self._done_cb(False, self._result)
                return

            # 4) Evaluate the 2 chains to see if the new chain should be
            # committed
            commit_new_chain = self._test_commit_new_chain()

            # 5) Consensus to compute batch sets (only if we are switching).
            if commit_new_chain:
                (self._result["committed_batches"],
                 self._result["uncommitted_batches"]) =\
                    self._compute_batch_change(new_chain, cur_chain)

            # 6) Tell the journal we are done.
            self._done_cb(commit_new_chain, self._result)
            LOGGER.info("Finished block validation of: %s", self._new_block)
        except BlockValidationAborted:
            self._done_cb(False, self._result)
            return
        except ChainHeadUpdated:
            self._done_cb(False, self._result)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s",
                self._new_block)
            # callback to clean up the block out of the processing list.
            self._done_cb(False, self._result)
Пример #5
0
 def setUp(self):
     self._settings_view_factory = MockSettingsViewFactory()
     self._validation_rule_enforcer = ValidationRuleEnforcer(
         self._settings_view_factory)
Пример #6
0
class ValidationRuleEnforcerTest(unittest.TestCase):
    def setUp(self):
        self._settings_view_factory = MockSettingsViewFactory()
        self._validation_rule_enforcer = ValidationRuleEnforcer(
            self._settings_view_factory)

    def _make_block(self,
                    txns_family,
                    signer_public_key,
                    same_public_key=True):
        transactions = []
        for family in txns_family:
            txn_header = TransactionHeader(family_name=family,
                                           signer_public_key=signer_public_key)
            txn = Transaction(header=txn_header.SerializeToString())
            transactions.append(txn)

        batch = Batch(transactions=transactions)
        if same_public_key:
            block_header = BlockHeader(signer_public_key=signer_public_key)
        else:
            block_header = BlockHeader(signer_public_key="other")
        block = Block(header=block_header.SerializeToString(), batches=[batch])
        return BlockWrapper(block)

    def test_no_setting(self):
        """
        Test that if no validation rules are set, the block is valid.
        """
        blkw = self._make_block(["intkey"], "pub_key")
        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

    def test_n_of_x(self):
        """
        Test that if NofX Rule is set, the validation rule is checked
        correctly. Test:
            1. Valid Block, has one or less intkey transactions.
            2. Invalid Block, to many intkey transactions.
            3. Valid Block, ignore rule because it is formatted incorrectly.
        """
        blkw = self._make_block(["intkey"], "pub_key")
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "NofX:1,intkey")

        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "NofX:0,intkey")

        self.assertFalse(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "NofX:0")

        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

    def test_x_at_y(self):
        """
        Test that if XatY Rule is set, the validation rule is checked
        correctly. Test:
            1. Valid Block, has intkey at the 0th position.
            2. Invalid Block, does not have an blockinfo txn at the 0th postion
            3. Valid Block, ignore rule because it is formatted incorrectly.
        """
        blkw = self._make_block(["intkey"], "pub_key")
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "XatY:intkey,0")

        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "XatY:blockinfo,0")

        self.assertFalse(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "XatY:0")

        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

    def test_local(self):
        """
        Test that if local Rule is set, the validation rule is checked
        correctly. Test:
            1. Valid Block, first transaction is signed by the same signer as
               the block.
            2. Invalid Block, first transaction is not signed by the same
               signer as the block.
            3. Valid Block, ignore rule because it is formatted incorrectly.
        """
        blkw = self._make_block(["intkey"], "pub_key")
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "local:0")

        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

        blkw = self._make_block(["intkey"], "pub_key", False)
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "local:0")

        self.assertFalse(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules", "local:test")

        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

    def test_all_at_once(self):
        """
        Test that if multiple rules are set, they are all checked correctly.
        Block should be valid.
        """
        blkw = self._make_block(["intkey"], "pub_key")
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules",
            "XatY:intkey,0;XatY:intkey,0;local:0")

        self.assertTrue(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

    def test_all_at_once_bad_number_of_intkey(self):
        """
        Test that if multiple rules are set, they are all checked correctly.
        Block is invalid, because there are too many intkey transactions
        """
        blkw = self._make_block(["intkey"], "pub_key")
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules",
            "NofX:0,intkey;XatY:intkey,0;local:0")

        self.assertFalse(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

    def test_all_at_once_bad_family_at_index(self):
        """
        Test that if multiple rules are set, they are all checked correctly.
        Block is invalid, there is not a blockinfo transactions at the 0th
        position.
        """
        blkw = self._make_block(["intkey"], "pub_key")
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules",
            "XatY:intkey,0;XatY:blockinfo,0;local:0")

        self.assertFalse(
            self._validation_rule_enforcer.validate(blkw, "state_root"))

    def test_all_at_once_signer_key(self):
        """
        Test that if multiple rules are set, they are all checked correctly.
        Block is invalid, transaction at the 0th postion is not signed by the
        same signer as the block.
        """
        blkw = self._make_block(["intkey"], "pub_key", False)
        self._settings_view_factory.add_setting(
            "sawtooth.validator.block_validation_rules",
            "XatY:intkey,0;XatY:intkey,0;local:0")

        self.assertFalse(
            self._validation_rule_enforcer.validate(blkw, "state_root"))
Пример #7
0
class BlockValidator(object):
    """
    Responsible for validating a block, handles both chain extensions and fork
    will determine if the new block should be the head of the chain and return
    the information necessary to do the switch if necessary.
    """
    def __init__(self,
                 block_cache,
                 state_view_factory,
                 transaction_executor,
                 squash_handler,
                 identity_signer,
                 data_dir,
                 config_dir,
                 permission_verifier,
                 metrics_registry=None,
                 thread_pool=None):
        """Initialize the BlockValidator
        Args:
            block_cache: The cache of all recent blocks and the processing
                state associated with them.
            state_view_factory: A factory that can be used to create read-
                only views of state for a particular merkle root, in
                particular the state as it existed when a particular block
                was the chain head.
            transaction_executor: The transaction executor used to
                process transactions.
            squash_handler: A parameter passed when creating transaction
                schedulers.
            identity_signer: A cryptographic signer for signing blocks.
            data_dir: Path to location where persistent data for the
                consensus module can be stored.
            config_dir: Path to location where config data for the
                consensus module can be found.
            permission_verifier: The delegate for handling permission
                validation on blocks.
            metrics_registry: (Optional) Pyformance metrics registry handle for
                creating new metrics.
            thread_pool: (Optional) Executor pool used to submit block
                validation jobs. If not specified, a default will be created.
        Returns:
            None
        """
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._squash_handler = squash_handler
        self._identity_signer = identity_signer
        self._data_dir = data_dir
        self._config_dir = config_dir
        self._permission_verifier = permission_verifier

        self._validation_rule_enforcer = ValidationRuleEnforcer(
            SettingsViewFactory(state_view_factory))

        self._thread_pool = InstrumentedThreadPoolExecutor(1) \
            if thread_pool is None else thread_pool

        if metrics_registry:
            self._moved_to_fork_count = CounterWrapper(
                metrics_registry.counter('chain_head_moved_to_fork_count'))
        else:
            self._moved_to_fork_count = CounterWrapper()

    def stop(self):
        self._thread_pool.shutdown(wait=True)

    def _get_previous_block_state_root(self, blkw):
        if blkw.previous_block_id == NULL_BLOCK_IDENTIFIER:
            return INIT_ROOT_KEY

        return self._block_cache[blkw.previous_block_id].state_root_hash

    @staticmethod
    def _validate_transactions_in_batch(batch, chain_commit_state):
        """Verify that all transactions in this batch are unique and that all
        transaction dependencies in this batch have been satisfied.

        :param batch: the batch to verify
        :param chain_commit_state: the current chain commit state to verify the
            batch against
        :return:
        Boolean: True if all dependencies are present and all transactions
        are unique.
        """
        for txn in batch.transactions:
            txn_hdr = TransactionHeader()
            txn_hdr.ParseFromString(txn.header)
            if chain_commit_state.has_transaction(txn.header_signature):
                LOGGER.debug("Batch invalid due to duplicate transaction: %s",
                             txn.header_signature[:8])
                return False
            for dep in txn_hdr.dependencies:
                if not chain_commit_state.has_transaction(dep):
                    LOGGER.debug(
                        "Batch invalid due to missing transaction dependency;"
                        " transaction %s depends on %s",
                        txn.header_signature[:8], dep[:8])
                    return False
        return True

    def _validate_batches_in_block(self, blkw, prev_state_root,
                                   chain_commit_state):
        if blkw.block.batches:
            scheduler = self._transaction_executor.create_scheduler(
                self._squash_handler, prev_state_root)
            self._transaction_executor.execute(scheduler)
            try:
                for batch, has_more in look_ahead(blkw.block.batches):
                    if chain_commit_state.has_batch(batch.header_signature):
                        LOGGER.debug(
                            "Block(%s) rejected due to duplicate "
                            "batch, batch: %s", blkw,
                            batch.header_signature[:8])
                        raise InvalidBatch()

                    # Verify dependencies and uniqueness
                    if self._validate_transactions_in_batch(
                            batch, chain_commit_state):
                        # Only add transactions to commit state if all
                        # transactions in the batch are good.
                        chain_commit_state.add_batch(batch,
                                                     add_transactions=True)
                    else:
                        raise InvalidBatch()

                    if has_more:
                        scheduler.add_batch(batch)
                    else:
                        scheduler.add_batch(batch, blkw.state_root_hash)
            except InvalidBatch:
                LOGGER.debug(
                    "Invalid batch %s encountered during "
                    "verification of block %s", batch.header_signature[:8],
                    blkw)
                scheduler.cancel()
                return False
            except Exception:
                scheduler.cancel()
                raise

            scheduler.finalize()
            scheduler.complete(block=True)
            state_hash = None

            for batch in blkw.batches:
                batch_result = scheduler.get_batch_execution_result(
                    batch.header_signature)
                if batch_result is not None and batch_result.is_valid:
                    txn_results = \
                        scheduler.get_transaction_execution_results(
                            batch.header_signature)
                    blkw.execution_results.extend(txn_results)
                    state_hash = batch_result.state_hash
                    blkw.num_transactions += len(batch.transactions)
                else:
                    return False
            if blkw.state_root_hash != state_hash:
                LOGGER.debug(
                    "Block(%s) rejected due to state root hash "
                    "mismatch: %s != %s", blkw, blkw.state_root_hash,
                    state_hash)
                return False
        return True

    def _validate_permissions(self, blkw, prev_state_root):
        """
        Validate that all of the batch signers and transaction signer for the
        batches in the block are permitted by the transactor permissioning
        roles stored in state as of the previous block. If a transactor is
        found to not be permitted, the block is invalid.
        """
        if blkw.block_num != 0:
            for batch in blkw.batches:
                if not self._permission_verifier.is_batch_signer_authorized(
                        batch, prev_state_root, from_state=True):
                    return False
        return True

    def _validate_on_chain_rules(self, blkw, prev_state_root):
        """
        Validate that the block conforms to all validation rules stored in
        state. If the block breaks any of the stored rules, the block is
        invalid.
        """
        if blkw.block_num != 0:
            return self._validation_rule_enforcer.validate(
                blkw, prev_state_root)
        return True

    def validate_block(self, blkw, consensus, chain_head=None, chain=None):
        if blkw.status == BlockStatus.Valid:
            return True
        elif blkw.status == BlockStatus.Invalid:
            return False

        # pylint: disable=broad-except
        try:
            if chain_head is None:
                # Try to get the chain head from the block store; note that the
                # block store may also return None for the chain head if a
                # genesis block hasn't been committed yet.
                chain_head = self._block_cache.block_store.chain_head

            if chain is None:
                chain = []
            chain_commit_state = ChainCommitState(
                self._block_cache.block_store, chain)

            try:
                prev_state_root = self._get_previous_block_state_root(blkw)
            except KeyError:
                LOGGER.debug("Block rejected due to missing predecessor: %s",
                             blkw)
                return False

            if not self._validate_permissions(blkw, prev_state_root):
                blkw.status = BlockStatus.Invalid
                return False

            public_key = \
                self._identity_signer.get_public_key().as_hex()
            consensus_block_verifier = consensus.BlockVerifier(
                block_cache=self._block_cache,
                state_view_factory=self._state_view_factory,
                data_dir=self._data_dir,
                config_dir=self._config_dir,
                validator_id=public_key)

            if not consensus_block_verifier.verify_block(blkw):
                blkw.status = BlockStatus.Invalid
                return False

            if not self._validate_on_chain_rules(blkw, prev_state_root):
                blkw.status = BlockStatus.Invalid
                return False

            if not self._validate_batches_in_block(blkw, prev_state_root,
                                                   chain_commit_state):
                blkw.status = BlockStatus.Invalid
                return False

            # since changes to the chain-head can change the state of the
            # blocks in BlockStore we have to revalidate this block.
            block_store = self._block_cache.block_store

            # The chain_head is None when this is the genesis block or if the
            # block store has no chain_head.
            if chain_head is not None:
                if chain_head.identifier != block_store.chain_head.identifier:
                    raise ChainHeadUpdated()

            blkw.status = BlockStatus.Valid
            return True

        except ChainHeadUpdated as e:
            raise e

        except Exception:
            LOGGER.exception(
                "Unhandled exception BlockPublisher.validate_block()")
            return False

    @staticmethod
    def _compare_chain_height(head_a, head_b):
        """Returns True if head_a is taller, False if head_b is taller, and
        True if the heights are the same."""
        return head_a.block_num - head_b.block_num >= 0

    def _build_fork_diff_to_common_height(self, head_long, head_short):
        """Returns a list of blocks on the longer chain since the greatest
        common height between the two chains. Note that the chains may not
        have the same block id at the greatest common height.

        Args:
            head_long (BlockWrapper)
            head_short (BlockWrapper)

        Returns:
            (list of BlockWrapper) All blocks in the longer chain since the
            last block in the shorter chain. Ordered newest to oldest.

        Raises:
            BlockValidationAborted
                The block is missing a predecessor. Note that normally this
                shouldn't happen because of the completer."""
        fork_diff = []

        last = head_short.block_num
        blk = head_long

        while blk.block_num > last:
            if blk.previous_block_id == NULL_BLOCK_IDENTIFIER:
                break

            fork_diff.append(blk)
            try:
                blk = self._block_cache[blk.previous_block_id]
            except KeyError:
                LOGGER.debug(
                    "Failed to build fork diff due to missing predecessor: %s",
                    blk)

                # Mark all blocks in the longer chain since the invalid block
                # as invalid.
                for blk in fork_diff:
                    blk.status = BlockStatus.Invalid
                raise BlockValidationAborted()

        return blk, fork_diff

    def _extend_fork_diff_to_common_ancestor(self, new_blkw, cur_blkw,
                                             new_chain, cur_chain):
        """ Finds a common ancestor of the two chains. new_blkw and cur_blkw
        must be at the same height, or this will always fail.
        """
        while cur_blkw.identifier != new_blkw.identifier:
            if (cur_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER
                    or new_blkw.previous_block_id == NULL_BLOCK_IDENTIFIER):
                # We are at a genesis block and the blocks are not the same
                LOGGER.info("Block rejected due to wrong genesis: %s %s",
                            cur_blkw, new_blkw)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()

            new_chain.append(new_blkw)
            try:
                new_blkw = self._block_cache[new_blkw.previous_block_id]
            except KeyError:
                LOGGER.info("Block %s rejected due to missing predecessor %s",
                            new_blkw, new_blkw.previous_block_id)
                for b in new_chain:
                    b.status = BlockStatus.Invalid
                raise BlockValidationAborted()

            cur_chain.append(cur_blkw)
            cur_blkw = self._block_cache[cur_blkw.previous_block_id]

    def _compare_forks_consensus(self, consensus, chain_head, new_block):
        """Ask the consensus module which fork to choose.
        """
        public_key = self._identity_signer.get_public_key().as_hex()
        fork_resolver = consensus.ForkResolver(
            block_cache=self._block_cache,
            state_view_factory=self._state_view_factory,
            data_dir=self._data_dir,
            config_dir=self._config_dir,
            validator_id=public_key)

        return fork_resolver.compare_forks(chain_head, new_block)

    @staticmethod
    def _get_batch_commit_changes(new_chain, cur_chain):
        """
        Get all the batches that should be committed from the new chain and
        all the batches that should be uncommitted from the current chain.
        """
        committed_batches = []
        for blkw in new_chain:
            for batch in blkw.batches:
                committed_batches.append(batch)

        uncommitted_batches = []
        for blkw in cur_chain:
            for batch in blkw.batches:
                uncommitted_batches.append(batch)

        return (committed_batches, uncommitted_batches)

    def submit_blocks_for_verification(self, blocks, consensus, callback):
        for block in blocks:
            self._thread_pool.submit(self.process_block_verification, block,
                                     consensus, callback)

    def process_block_verification(self, block, consensus, callback):
        """
        Main entry for Block Validation, Take a given candidate block
        and decide if it is valid then if it is valid determine if it should
        be the new head block. Returns the results to the ChainController
        so that the change over can be made if necessary.
        """
        try:
            result = BlockValidationResult(block)
            LOGGER.info("Starting block validation of : %s", block)

            # Get the current chain_head and store it in the result
            chain_head = self._block_cache.block_store.chain_head
            result.chain_head = chain_head

            # Create new local variables for current and new block, since
            # these variables get modified later
            current_block = chain_head
            new_block = block

            # Get all the blocks since the greatest common height from the
            # longer chain.
            if self._compare_chain_height(current_block, new_block):
                current_block, result.current_chain =\
                    self._build_fork_diff_to_common_height(
                        current_block, new_block)
            else:
                new_block, result.new_chain =\
                    self._build_fork_diff_to_common_height(
                        new_block, current_block)

            # Add blocks to the two chains until a common ancestor is found
            # or raise an exception if no common ancestor is found
            self._extend_fork_diff_to_common_ancestor(new_block, current_block,
                                                      result.new_chain,
                                                      result.current_chain)

            valid = True
            for blk in reversed(result.new_chain):
                if valid:
                    if not self.validate_block(blk, consensus, chain_head,
                                               result.current_chain):
                        LOGGER.info("Block validation failed: %s", blk)
                        valid = False
                    result.transaction_count += block.num_transactions
                else:
                    LOGGER.info(
                        "Block marked invalid(invalid predecessor): %s", blk)
                    blk.status = BlockStatus.Invalid

            if not valid:
                callback(False, result)
                return

            # Ask consensus if the new chain should be committed
            LOGGER.info(
                "Comparing current chain head '%s' against new block '%s'",
                chain_head, new_block)
            for i in range(
                    max(len(result.new_chain), len(result.current_chain))):
                cur = new = num = "-"
                if i < len(result.current_chain):
                    cur = result.current_chain[i].header_signature[:8]
                    num = result.current_chain[i].block_num
                if i < len(result.new_chain):
                    new = result.new_chain[i].header_signature[:8]
                    num = result.new_chain[i].block_num
                LOGGER.info(
                    "Fork comparison at height %s is between %s and %s", num,
                    cur, new)

            commit_new_chain = self._compare_forks_consensus(
                consensus, chain_head, block)

            # If committing the new chain, get the list of committed batches
            # from the current chain that need to be uncommitted and the list
            # of uncommitted batches from the new chain that need to be
            # committed.
            if commit_new_chain:
                commit, uncommit =\
                    self._get_batch_commit_changes(
                        result.new_chain, result.current_chain)
                result.committed_batches = commit
                result.uncommitted_batches = uncommit

                if result.new_chain[0].previous_block_id \
                        != chain_head.identifier:
                    self._moved_to_fork_count.inc()

            # Pass the results to the callback function
            callback(commit_new_chain, result)
            LOGGER.info("Finished block validation of: %s", block)

        except BlockValidationAborted:
            callback(False, result)
            return
        except ChainHeadUpdated:
            callback(False, result)
            return
        except Exception:  # pylint: disable=broad-except
            LOGGER.exception(
                "Block validation failed with unexpected error: %s", block)
            # callback to clean up the block out of the processing list.
            callback(False, result)