def initialize_block(self, previous_block): """Begin building a new candidate block. Args: previous_block (BlockWrapper): The block to base the new block on. Raises: ConsensusNotReady Consensus is not ready to build a block """ # using previous_block so so we can use the setting_cache max_batches = int( self._settings_cache.get_setting( 'sawtooth.publisher.max_batches_per_block', previous_block.state_root_hash, default_value=0)) state_view = BlockWrapper.state_view_for_block( previous_block, self._state_view_factory) public_key = self._identity_signer.get_public_key().as_hex() consensus = self._load_consensus(previous_block, state_view, public_key) batch_injectors = self._load_injectors(previous_block) block_header = BlockHeader( block_num=previous_block.block_num + 1, previous_block_id=previous_block.header_signature, signer_public_key=public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): raise ConsensusNotReady() # create a new scheduler scheduler = self._transaction_executor.create_scheduler( previous_block.state_root_hash) # build the TransactionCommitCache committed_txn_cache = TransactionCommitCache( self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock( self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches, batch_injectors, SettingsView(state_view), self._identity_signer) for batch in self._pending_batches: if self._candidate_block.can_add_batch(): self._candidate_block.add_batch(batch) else: break
def _build_candidate_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) settings_view = SettingsView(state_view) max_batches = settings_view.get_setting( 'sawtooth.publisher.max_batches_per_block', default_value=0, value_type=int) consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_public_key) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_pubkey=self._identity_public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): LOGGER.debug("Consensus not ready to build candidate block.") return None # create a new scheduler scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCommitCache committed_txn_cache = TransactionCommitCache( self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock(self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches) for batch in self._pending_batches: if self._candidate_block.can_add_batch: self._candidate_block.add_batch(batch) else: break
def finalize_block(self, identity_signer, pending_batches): """Compose the final Block to publish. This involves flushing the scheduler, having consensus bless the block, and signing the block. :param identity_signer: the cryptographic signer to sign the block with. :param pending_batches: list to receive any batches that were submitted to add to the block but were not validated before this call. :return: The generated Block, or None if Block failed to finalize. In both cases the pending_batches will contain the list of batches that need to be added to the next Block that is built. """ self._scheduler.finalize() self._scheduler.complete(block=True) # this is a transaction cache to track the transactions committed # up to this batch. Only valid transactions that were processed # by the scheduler are added. committed_txn_cache = TransactionCommitCache(self._block_store) builder = self._block_builder bad_batches = [] # the list of batches that failed processing state_hash = None # Walk the pending batch list: # - find the state hash for the block, the block state_hash is # is randomly placed on one of the transactions, so must interogate # every batch to find it. If it is on a batch that failed processing # then this block will be abandoned. # - build three lists of batches: # 1) a lists of all valid transactions that will be included in the # block, these are added to the BlockBuilder to include in the Block # 2) all batches that were not executed, these are to be returned # in the pending_batches list # 3) all batches that failed processing. These will be discarded. # This list is needed in some case when the block is abandoned to # make sure they do not remain in the pending_batches list. for batch in self._pending_batches: if batch.trace: LOGGER.debug("TRACE %s: %s", batch.header_signature, self.__class__.__name__) result = self._scheduler.get_batch_execution_result( batch.header_signature) # if a result is None, this means that the executor never # received the batch and it should be added to # the pending_batches, to be added to the next # block if result is None: # If this was an injected batch, don't keep it in pending # batches since it had to be in this block if batch.header_signature not in self._injected_batch_ids: pending_batches.append(batch) else: LOGGER.warning("Failed to inject batch '%s'", batch.header_signature) elif result.is_valid: # check if a dependent batch failed. This could be belt and # suspenders action here but it is logically possible that # a transaction has a dependency that fails it could # still succeed validation. In which case we do not want # to add it to the batch. if not self._check_batch_dependencies(batch, committed_txn_cache): LOGGER.debug( "Batch %s invalid, due to missing txn " "dependency.", batch.header_signature) LOGGER.debug( "Abandoning block %s:" + "root state hash has invalid txn applied", builder) # Update the pending batch list to be all the # batches that passed validation to this point and # none of the ones that failed. It is possible that # this batch caused a future batch to fail so # we leave all of the batches that failed after this # one in the list. bad_batches.append(batch) pending_batches.clear() pending_batches.extend([ x for x in self._pending_batches if x not in bad_batches ]) return None else: builder.add_batch(batch) committed_txn_cache.add_batch(batch) if result.state_hash is not None: state_hash = result.state_hash else: bad_batches.append(batch) LOGGER.debug("Batch %s invalid, not added to block.", batch.header_signature) if state_hash is None or not builder.batches: LOGGER.debug("Abandoning block %s: no batches added", builder) return None if not self._consensus.finalize_block(builder.block_header): LOGGER.debug( "Abandoning block %s, consensus failed to finalize " "it", builder) # return all valid batches to the pending_batches list pending_batches.clear() pending_batches.extend( [x for x in self._pending_batches if x not in bad_batches]) return None builder.set_state_hash(state_hash) self._sign_block(builder, identity_signer) return builder.build_block()
def _build_candidate_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) # using chain_head so so we can use the setting_cache max_batches = int( self._settings_cache.get_setting( 'sawtooth.publisher.max_batches_per_block', chain_head.state_root_hash, default_value=0)) public_key = self._identity_signer.get_public_key().as_hex() consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, config_dir=self._config_dir, validator_id=public_key) batch_injectors = [] if self._batch_injector_factory is not None: batch_injectors = self._batch_injector_factory.create_injectors( chain_head.identifier) if batch_injectors: LOGGER.debug("Loaded batch injectors: %s", batch_injectors) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_public_key=public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): if not self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = True LOGGER.debug("Consensus not ready to build candidate block.") return None if self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = False LOGGER.debug("Consensus is ready to build candidate block.") # create a new scheduler scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCommitCache committed_txn_cache = TransactionCommitCache( self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock( self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches, batch_injectors, SettingsView(state_view), public_key) for batch in self._pending_batches: if self._candidate_block.can_add_batch: self._candidate_block.add_batch(batch) else: break
def finalize_block(self, identity_signer, pending_batches): """Compose the final Block to publish. This involves flushing the scheduler, having consensus bless the block, and signing the block. :param identity_signer: the cryptographic signer to sign the block with. :param pending_batches: list to receive any batches that were submitted to add to the block but were not validated before this call. :return: The generated Block, or None if Block failed to finalize. In both cases the pending_batches will contain the list of batches that need to be added to the next Block that is built. """ self._scheduler.unschedule_incomplete_batches() self._scheduler.finalize() self._scheduler.complete(block=True) # this is a transaction cache to track the transactions committed # up to this batch. Only valid transactions that were processed # by the scheduler are added. committed_txn_cache = TransactionCommitCache(self._block_store) builder = self._block_builder bad_batches = [] # the list of batches that failed processing state_hash = None # Walk the pending batch list: # - find the state hash for the block, the block state_hash is # is randomly placed on one of the transactions, so must interogate # every batch to find it. If it is on a batch that failed processing # then this block will be abandoned. # - build three lists of batches: # 1) a lists of all valid transactions that will be included in the # block, these are added to the BlockBuilder to include in the Block # 2) all batches that were not executed, these are to be returned # in the pending_batches list # 3) all batches that failed processing. These will be discarded. # This list is needed in some case when the block is abandoned to # make sure they do not remain in the pending_batches list. for batch in self._pending_batches: if batch.trace: LOGGER.debug("TRACE %s: %s", batch.header_signature, self.__class__.__name__) result = self._scheduler.get_batch_execution_result( batch.header_signature) # if a result is None, this means that the executor never # received the batch and it should be added to # the pending_batches, to be added to the next # block if result is None: # If this was an injected batch, don't keep it in pending # batches since it had to be in this block if batch.header_signature not in self._injected_batch_ids: pending_batches.append(batch) else: LOGGER.warning( "Failed to inject batch '%s'", batch.header_signature) elif result.is_valid: # check if a dependent batch failed. This could be belt and # suspenders action here but it is logically possible that # a transaction has a dependency that fails it could # still succeed validation. In which case we do not want # to add it to the batch. if not self._check_batch_dependencies(batch, committed_txn_cache): LOGGER.debug("Batch %s invalid, due to missing txn " "dependency.", batch.header_signature) LOGGER.debug("Abandoning block %s:" + "root state hash has invalid txn applied", builder) # Update the pending batch list to be all the # batches that passed validation to this point and # none of the ones that failed. It is possible that # this batch caused a future batch to fail so # we leave all of the batches that failed after this # one in the list. bad_batches.append(batch) pending_batches.clear() pending_batches.extend([ x for x in self._pending_batches if x not in bad_batches ]) return None else: builder.add_batch(batch) committed_txn_cache.add_batch(batch) if result.state_hash is not None: state_hash = result.state_hash else: bad_batches.append(batch) LOGGER.debug("Batch %s invalid, not added to block.", batch.header_signature) if state_hash is None or not builder.batches: LOGGER.debug("Abandoning block %s: no batches added", builder) return None if not self._consensus.finalize_block(builder.block_header): LOGGER.debug("Abandoning block %s, consensus failed to finalize " "it", builder) # return all valid batches to the pending_batches list pending_batches.clear() pending_batches.extend([x for x in self._pending_batches if x not in bad_batches]) return None builder.set_state_hash(state_hash) self._sign_block(builder, identity_signer) return builder.build_block()