Example #1
0
class BlockPublisher(object):
    """
    Responsible for generating new blocks and publishing them when the
    Consensus deems it appropriate.
    """
    def __init__(self, transaction_executor, block_cache, state_view_factory,
                 block_sender, batch_sender, squash_handler, chain_head,
                 identity_signing_key, data_dir):
        """
        Initialize the BlockPublisher object

        Args:
            transaction_executor (:obj:`TransactionExecutor`): A
                TransactionExecutor instance.
            block_cache (:obj:`BlockCache`): A BlockCache instance.
            state_view_factory (:obj:`StateViewFactory`): StateViewFactory for
                read-only state views.
            block_sender (:obj:`BlockSender`): The BlockSender instance.
            batch_sender (:obj:`BatchSender`): The BatchSender instance.
            squash_handler (function): Squash handler function for merging
                contexts.
            chain_head (:obj:`BlockWrapper`): The initial chain head.
            identity_signing_key (str): Private key for signing blocks
            data_dir (str): path to location where persistent data for the
             consensus module can be stored.
        """
        self._lock = RLock()
        self._candidate_block = None  # the next block in potential chain
        self._consensus = None
        self._block_cache = block_cache
        self._state_view_factory = state_view_factory
        self._transaction_executor = transaction_executor
        self._block_sender = block_sender
        self._batch_publisher = BatchPublisher(identity_signing_key,
                                               batch_sender)
        self._pending_batches = []  # batches we are waiting for validation,
        # arranged in the order of batches received.
        self._committed_txn_cache = TransactionCache(
            self._block_cache.block_store)
        # Look-up cache for transactions that are committed in the current
        # chain. Cache is used here so that we can support opportunistically
        # building on top of a block we published. As well as hold the state
        # of the transactions already added to the candidate block.

        self._scheduler = None
        self._chain_head = chain_head  # block (BlockWrapper)
        self._squash_handler = squash_handler
        self._identity_signing_key = identity_signing_key
        self._identity_public_key = \
            signing.generate_pubkey(self._identity_signing_key)
        self._data_dir = data_dir

    def _build_block(self, chain_head):
        """ Build a candidate block and construct the consensus object to
        validate it.
        :param chain_head: The block to build on top of.
        :return: (BlockBuilder) - The candidate block in a BlockBuilder
        wrapper.
        """
        state_view = BlockWrapper.state_view_for_block(
            chain_head, self._state_view_factory)
        consensus_module = ConsensusFactory.get_configured_consensus_module(
            chain_head.header_signature, state_view)

        self._consensus = consensus_module.\
            BlockPublisher(block_cache=self._block_cache,
                           state_view_factory=self._state_view_factory,
                           batch_publisher=self._batch_publisher,
                           data_dir=self._data_dir,
                           validator_id=self._identity_public_key)

        block_header = BlockHeader(
            block_num=chain_head.block_num + 1,
            previous_block_id=chain_head.header_signature,
            signer_pubkey=self._identity_public_key)
        block_builder = BlockBuilder(block_header)
        if not self._consensus.initialize_block(block_builder.block_header):
            LOGGER.debug("Consensus not ready to build candidate block.")
            return None

        # Cancel the previous scheduler if it did not complete.
        if self._scheduler is not None \
                and not self._scheduler.complete(block=False):
            self._scheduler.cancel()

        # create a new scheduler
        self._scheduler = self._transaction_executor.create_scheduler(
            self._squash_handler, chain_head.state_root_hash)

        # build the TransactionCache
        self._committed_txn_cache = TransactionCache(
            self._block_cache.block_store)
        if chain_head.header_signature not in self._block_cache.block_store:
            # if we opportunistically building a block
            # we need to check make sure we track that blocks transactions
            # as recorded.
            for batch in chain_head.block.batches:
                for txn in batch.transactions:
                    self._committed_txn_cache.add_txn(txn.header_signature)

        self._transaction_executor.execute(self._scheduler)
        for batch in self._pending_batches:
            self._validate_batch(batch)

        return block_builder

    def _sign_block(self, block):
        """ The block should be complete and the final
        signature from the publishing validator(this validator) needs to
        be added.
        """
        block_header = block.block_header
        header_bytes = block_header.SerializeToString()
        signature = signing.sign(header_bytes, self._identity_signing_key)
        block.set_signature(signature)
        return block

    def _check_batch_dependencies(self, batch, committed_txn_cache):
        """Check the dependencies for all transactions in this are present.
        If all are present the committed_txn is updated with all txn in this
        batch and True is returned. If they are not return failure and the
        committed_txn is not updated.
        :param batch: the batch to validate
        :param committed_txn(TransactionCache): Current set of committed
        transaction, updated during processing.
        :return: Boolean, True if dependencies checkout, False otherwise.
        """
        for txn in batch.transactions:
            if not self._check_transaction_dependencies(
                    txn, committed_txn_cache):
                # if any transaction in this batch fails the whole batch
                # fails.
                committed_txn_cache.remove_batch(batch)
                return False
            # update so any subsequent txn in the same batch can be dependent
            # on this transaction.
            committed_txn_cache.add_txn(txn.header_signature)
        return True

    def _check_transaction_dependencies(self, txn, committed_txn):
        """Check that all this transactions dependencies are present.
        :param tx: the transaction to check
        :param committed_txn(TransactionCache): Current set of committed
        :return: Boolean, True if dependencies checkout, False otherwise.
        """
        txn_hdr = TransactionHeader()
        txn_hdr.ParseFromString(txn.header)
        for dep in txn_hdr.dependencies:
            if dep not in committed_txn:
                LOGGER.debug(
                    "Transaction rejected due " +
                    "missing dependency, transaction " + "%s depends on %s",
                    txn.header_signature, dep)
                return False
        return True

    def _validate_batch(self, batch):
        """Schedule validation of a batch for inclusion in the new block
        :param batch: the batch to validate
        :return: None
        """
        if self._scheduler:
            try:
                self._scheduler.add_batch(batch)
            except SchedulerError as err:
                LOGGER.debug("Scheduler error processing batch: %s", err)

    def is_batch_already_commited(self, batch):
        """ Test if a batch is already committed to the chain or
        is already in the pending queue.
        """
        if self._block_cache.block_store.has_batch(batch.header_signature):
            return True
        else:
            for pending in self._pending_batches:
                if batch.header_signature == pending.header_signature:
                    return True
        return False

    def on_batch_received(self, batch):
        """
        A new batch is received, send it for validation
        :param batch: the new pending batch
        :return: None
        """
        with self._lock:
            # first we check if the transaction dependencies are satisfied
            # The completer should have taken care of making sure all
            # Batches containing dependent transactions were sent to the
            # BlockPublisher prior to this Batch. So if there is a missing
            # dependency this is an error condition and the batch will be
            # dropped.
            if self.is_batch_already_commited(batch):
                # batch is already committed.
                LOGGER.debug("Dropping previously committed batch: %s",
                             batch.header_signature)
                return
            elif self._check_batch_dependencies(batch,
                                                self._committed_txn_cache):
                self._pending_batches.append(batch)
                # if we are building a block then send schedule it for
                # execution.
                if self._chain_head is not None:
                    self._validate_batch(batch)
            else:
                LOGGER.debug("Dropping batch due to missing dependencies: %s",
                             batch.header_signature)

    def _rebuild_pending_batches(self, committed_batches, uncommitted_batches):
        """When the chain head is changed. This recomputes the list of pending
        transactions
        :param committed_batches: Batches committed in the current chain
        since the root of the fork switching from.
        :param uncommitted_batches: Batches that were committed in the old
        fork since the common root.
        """
        if committed_batches is None:
            committed_batches = []
        if uncommitted_batches is None:
            uncommitted_batches = []

        committed_set = set([x.header_signature for x in committed_batches])

        pending_batches = self._pending_batches
        self._pending_batches = []

        # Uncommitted and pending disjoint sets
        # since batches can only be committed to a chain once.
        for batch in uncommitted_batches:
            if batch.header_signature not in committed_set:
                if self._check_batch_dependencies(batch,
                                                  self._committed_txn_cache):
                    self._pending_batches.append(batch)

        for batch in pending_batches:
            if batch.header_signature not in committed_set:
                if self._check_batch_dependencies(batch,
                                                  self._committed_txn_cache):
                    self._pending_batches.append(batch)

    def on_chain_updated(self,
                         chain_head,
                         committed_batches=None,
                         uncommitted_batches=None):
        """
        The existing chain has been updated, the current head block has
        changed.

        :param chain_head: the new head of block_chain
        :param committed_batches: the set of batches that were committed
         as part of the new chain.
        :param uncommitted_batches: the list of transactions if any that are
        now de-committed when the new chain was selected.
        :return: None
        """
        try:
            with self._lock:
                LOGGER.info('Now building on top of block: %s', chain_head)

                self._chain_head = chain_head

                if self._candidate_block is not None and \
                        chain_head is not None and \
                        chain_head.identifier == \
                        self._candidate_block.previous_block_id:
                    # nothing to do. We are building of the current head.
                    # This can happen after we publish a block and
                    # opportunistically create a new block.
                    return
                elif chain_head is None:
                    # we don't have a chain head, we cannot build blocks
                    self._candidate_block = None
                    self._consensus = None

                    for batch in self._pending_batches:
                        self._committed_txn_cache.add_batch(batch)
                else:
                    self._rebuild_pending_batches(committed_batches,
                                                  uncommitted_batches)
                    self._candidate_block = self._build_block(chain_head)
        # pylint: disable=broad-except
        except Exception as exc:
            LOGGER.critical("on_chain_updated exception.")
            LOGGER.exception(exc)

    def _finalize_block(self, block):
        if self._scheduler:
            self._scheduler.finalize()
            self._scheduler.complete(block=True)

        # Read valid batches from self._scheduler
        pending_batches = self._pending_batches
        # this is a transaction cache to track the transactions committed
        # upto this batch.
        committed_txn_cache = TransactionCache(self._block_cache.block_store)
        self._pending_batches = []
        self._committed_txn_cache = TransactionCache(
            self._block_cache.block_store)

        state_hash = None
        for batch in pending_batches:
            result = self._scheduler.get_batch_execution_result(
                batch.header_signature)
            # if a result is None, this means that the executor never
            # received the batch and it should be added to
            # the pending_batches
            if result is None:
                self._pending_batches.append(batch)
                self._committed_txn_cache.add_batch(batch)
            elif result.is_valid:
                # check if a dependent batch failed. This could be belt and
                # suspenders action here but it is logically possible that
                # a transaction has a dependency that fails it could
                # still succeed validation. In that case we do not want
                # to add it to the batch.
                if not self._check_batch_dependencies(batch,
                                                      committed_txn_cache):
                    LOGGER.debug(
                        "Batch %s invalid, due to missing txn "
                        "dependency.", batch.header_signature)
                    LOGGER.debug(
                        "Abandoning block %s:" +
                        "root state hash has invalid txn applied", block)
                    pending_batches.remove(batch)
                    self._pending_batches = pending_batches
                    self._committed_txn_cache = \
                        TransactionCache(self._block_cache.block_store)
                    return False
                else:
                    block.add_batch(batch)
                    self._committed_txn_cache.add_batch(batch)
                state_hash = result.state_hash
            else:
                committed_txn_cache.uncommit_batch(batch)
                LOGGER.debug("Batch %s invalid, not added to block.",
                             batch.header_signature)

        if state_hash is None:
            LOGGER.debug("Abandoning block %s no batches added", block)
            return False

        if not self._consensus.finalize_block(block.block_header):
            LOGGER.debug(
                "Abandoning block %s, consensus failed to finalize "
                "it", block)
            return False

        self._consensus = None

        block.set_state_hash(state_hash)
        self._sign_block(block)

        return True

    def on_check_publish_block(self, force=False):
        """Ask the consensus module if it is time to claim the candidate block
        if it is then, claim it and tell the world about it.
        :return:
            None
        """
        try:
            with self._lock:
                if self._chain_head is not None and\
                        self._candidate_block is None and\
                        len(self._pending_batches) != 0:
                    self._candidate_block = self._build_block(self._chain_head)
                if self._candidate_block and \
                        (force or len(self._pending_batches) != 0) and \
                        self._consensus.check_publish_block(self.
                                                            _candidate_block.
                                                            block_header):
                    candidate = self._candidate_block
                    self._candidate_block = None

                    if not self._finalize_block(candidate):
                        return

                    block = BlockWrapper(candidate.build_block())
                    self._block_cache[block.identifier] = block  # add the
                    # block to the cache, so we can build on top of it.
                    self._block_sender.send(block.block)

                    LOGGER.info("Claimed Block: %s", block)

                    # We built our candidate, disable processing until
                    # the chain head is updated.
                    self.on_chain_updated(None)
        # pylint: disable=broad-except
        except Exception as exc:
            LOGGER.critical("on_check_publish_block exception.")
            LOGGER.exception(exc)
Example #2
0
    def finalize_block(self, identity_signing_key, pending_batches):
        """Compose the final Block to publish. This involves flushing
        the scheduler, having consensus bless the block, and signing
        the block.
        :param identity_signing_key: the key to sign the block with.
        :param pending_batches: list to receive any batches that were
        submitted to add to the block but were not validated before this
        call.
        """
        self._scheduler.finalize()
        self._scheduler.complete(block=True)

        # this is a transaction cache to track the transactions committed
        # up to this batch. Only valid transactions that were processed
        # by the scheduler are added.
        committed_txn_cache = TransactionCache(self._block_store)

        builder = self._block_builder
        state_hash = None
        for batch in self._pending_batches:
            result = self._scheduler.get_batch_execution_result(
                batch.header_signature)
            # if a result is None, this means that the executor never
            # received the batch and it should be added to
            # the pending_batches, to be added to the next
            # block
            if result is None:
                pending_batches.append(batch)
            elif result.is_valid:
                # check if a dependent batch failed. This could be belt and
                # suspenders action here but it is logically possible that
                # a transaction has a dependency that fails it could
                # still succeed validation. In which case we do not want
                # to add it to the batch.
                if not self._check_batch_dependencies(batch,
                                                      committed_txn_cache):
                    LOGGER.debug("Batch %s invalid, due to missing txn "
                                 "dependency.", batch.header_signature)
                    LOGGER.debug("Abandoning block %s:" +
                                 "root state hash has invalid txn applied",
                                 builder)
                    return None
                else:
                    builder.add_batch(batch)
                    committed_txn_cache.add_batch(batch)
                if result.state_hash is not None:
                    state_hash = result.state_hash
            else:
                LOGGER.debug("Batch %s invalid, not added to block.",
                             batch.header_signature)

        if state_hash is None:
            LOGGER.debug("Abandoning block %s no batches added", builder)
            return None

        if not self._consensus.finalize_block(builder.block_header):
            LOGGER.debug("Abandoning block %s, consensus failed to finalize "
                         "it", builder)
            return False

        builder.set_state_hash(state_hash)
        self._sign_block(builder, identity_signing_key)
        return builder.build_block()
Example #3
0
    def finalize_block(self, identity_signing_key, pending_batches):
        """Compose the final Block to publish. This involves flushing
        the scheduler, having consensus bless the block, and signing
        the block.
        :param identity_signing_key: the key to sign the block with.
        :param pending_batches: list to receive any batches that were
        submitted to add to the block but were not validated before this
        call.
        :return: The generated Block, or None if Block failed to finalize.
        In both cases the pending_batches will contain the list of batches
        that need to be added to the next Block that is built.
        """
        self._scheduler.finalize()
        self._scheduler.complete(block=True)

        # this is a transaction cache to track the transactions committed
        # up to this batch. Only valid transactions that were processed
        # by the scheduler are added.
        committed_txn_cache = TransactionCache(self._block_store)

        builder = self._block_builder
        bad_batches = []  # the list of batches that failed processing
        state_hash = None

        # Walk the pending batch list:
        # - find the state hash for the block, the block state_hash is
        # is randomly placed on one of the transactions, so must interogate
        # every batch to find it. If it is on a batch that failed processing
        # then this block will be abandoned.
        # - build three lists of batches:
        # 1) a lists of all valid transactions that will be included in the
        #   block, these are added to the BlockBuilder to include in the Block
        # 2) all batches that were not executed, these are to be returned
        #   in the pending_batches list
        # 3) all batches that failed processing. These will be discarded.
        #   This list is needed in some case when the block is abandoned to
        #   make sure they do not remain in the pending_batches list.
        for batch in self._pending_batches:
            result = self._scheduler.get_batch_execution_result(
                batch.header_signature)
            # if a result is None, this means that the executor never
            # received the batch and it should be added to
            # the pending_batches, to be added to the next
            # block
            if result is None:
                pending_batches.append(batch)
            elif result.is_valid:
                # check if a dependent batch failed. This could be belt and
                # suspenders action here but it is logically possible that
                # a transaction has a dependency that fails it could
                # still succeed validation. In which case we do not want
                # to add it to the batch.
                if not self._check_batch_dependencies(batch,
                                                      committed_txn_cache):
                    LOGGER.debug(
                        "Batch %s invalid, due to missing txn "
                        "dependency.", batch.header_signature)
                    LOGGER.debug(
                        "Abandoning block %s:" +
                        "root state hash has invalid txn applied", builder)
                    # Update the pending batch list to be all the
                    # batches that passed validation to this point and
                    # none of the ones that failed. It is possible that
                    # this batch caused a future batch to fail so
                    # we leave all of the batches that failed after this
                    # one in the list.
                    bad_batches.append(batch)
                    pending_batches.clear()
                    pending_batches.extend([
                        x for x in self._pending_batches
                        if x not in bad_batches
                    ])
                    return None
                else:
                    builder.add_batch(batch)
                    committed_txn_cache.add_batch(batch)
                if result.state_hash is not None:
                    state_hash = result.state_hash
            else:
                bad_batches.append(batch)
                LOGGER.debug("Batch %s invalid, not added to block.",
                             batch.header_signature)

        if state_hash is None or not builder.batches:
            LOGGER.debug("Abandoning block %s: no batches added", builder)
            return None

        if not self._consensus.finalize_block(builder.block_header):
            LOGGER.debug(
                "Abandoning block %s, consensus failed to finalize "
                "it", builder)
            # return all valid batches to the pending_batches list
            pending_batches.clear()
            pending_batches.extend(
                [x for x in self._pending_batches if x not in bad_batches])
            return None

        builder.set_state_hash(state_hash)
        self._sign_block(builder, identity_signing_key)
        return builder.build_block()
Example #4
0
    def finalize_block(self, identity_signing_key, pending_batches):
        """Compose the final Block to publish. This involves flushing
        the scheduler, having consensus bless the block, and signing
        the block.
        :param identity_signing_key: the key to sign the block with.
        :param pending_batches: list to receive any batches that were
        submitted to add to the block but were not validated before this
        call.
        :return: The generated Block, or None if Block failed to finalize.
        In both cases the pending_batches will contain the list of batches
        that need to be added to the next Block that is built.
        """
        self._scheduler.finalize()
        self._scheduler.complete(block=True)

        # this is a transaction cache to track the transactions committed
        # up to this batch. Only valid transactions that were processed
        # by the scheduler are added.
        committed_txn_cache = TransactionCache(self._block_store)

        builder = self._block_builder
        bad_batches = []  # the list of batches that failed processing
        state_hash = None

        # Walk the pending batch list:
        # - find the state hash for the block, the block state_hash is
        # is randomly placed on one of the transactions, so must interogate
        # every batch to find it. If it is on a batch that failed processing
        # then this block will be abandoned.
        # - build three lists of batches:
        # 1) a lists of all valid transactions that will be included in the
        #   block, these are added to the BlockBuilder to include in the Block
        # 2) all batches that were not executed, these are to be returned
        #   in the pending_batches list
        # 3) all batches that failed processing. These will be discarded.
        #   This list is needed in some case when the block is abandoned to
        #   make sure they do not remain in the pending_batches list.
        for batch in self._pending_batches:
            result = self._scheduler.get_batch_execution_result(
                batch.header_signature)
            # if a result is None, this means that the executor never
            # received the batch and it should be added to
            # the pending_batches, to be added to the next
            # block
            if result is None:
                pending_batches.append(batch)
            elif result.is_valid:
                # check if a dependent batch failed. This could be belt and
                # suspenders action here but it is logically possible that
                # a transaction has a dependency that fails it could
                # still succeed validation. In which case we do not want
                # to add it to the batch.
                if not self._check_batch_dependencies(batch,
                                                      committed_txn_cache):
                    LOGGER.debug("Batch %s invalid, due to missing txn "
                                 "dependency.", batch.header_signature)
                    LOGGER.debug("Abandoning block %s:" +
                                 "root state hash has invalid txn applied",
                                 builder)
                    # Update the pending batch list to be all the
                    # batches that passed validation to this point and
                    # none of the ones that failed. It is possible that
                    # this batch caused a future batch to fail so
                    # we leave all of the batches that failed after this
                    # one in the list.
                    bad_batches.append(batch)
                    pending_batches.clear()
                    pending_batches.extend([x for x in self._pending_batches
                                            if x not in bad_batches])
                    return None
                else:
                    builder.add_batch(batch)
                    committed_txn_cache.add_batch(batch)
                if result.state_hash is not None:
                    state_hash = result.state_hash
            else:
                bad_batches.append(batch)
                LOGGER.debug("Batch %s invalid, not added to block.",
                             batch.header_signature)

        if state_hash is None or not builder.batches:
            LOGGER.debug("Abandoning block %s: no batches added", builder)
            return None

        if not self._consensus.finalize_block(builder.block_header):
            LOGGER.debug("Abandoning block %s, consensus failed to finalize "
                         "it", builder)
            # return all valid batches to the pending_batches list
            pending_batches.clear()
            pending_batches.extend([x for x in self._pending_batches
                                    if x not in bad_batches])
            return None

        builder.set_state_hash(state_hash)
        self._sign_block(builder, identity_signing_key)
        return builder.build_block()