def _submit_blocks_for_verification(self, blocks): for blkw in blocks: state_view = BlockWrapper.state_view_for_block( self.chain_head, self._state_view_factory) consensus_module = \ ConsensusFactory.get_configured_consensus_module( self.chain_head.header_signature, state_view) validator = BlockValidator( consensus_module=consensus_module, new_block=blkw, block_cache=self._block_cache, state_view_factory=self._state_view_factory, done_cb=self.on_block_validated, executor=self._transaction_executor, squash_handler=self._squash_handler, identity_signer=self._identity_signer, data_dir=self._data_dir, config_dir=self._config_dir, permission_verifier=self._permission_verifier, metrics_registry=self._metrics_registry) self._blocks_processing[blkw.block.header_signature] = validator self._thread_pool.submit(validator.run)
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activates, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): the BlockHeader to initialize. Returns: True """ # Using the current chain head, we need to create a state view so we # can get our config values. state_view = \ BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) config_view = ConfigView(state_view) self._min_wait_time = config_view.get_setting( "sawtooth.consensus.min_wait_time", self._min_wait_time, int) self._max_wait_time = config_view.get_setting( "sawtooth.consensus.max_wait_time", self._max_wait_time, int) self._valid_block_publishers = config_view.get_setting( "sawtooth.consensus.valid_block_publishers", self._valid_block_publishers, list) block_header.consensus = b"Devmode" self._start_time = time.time() self._wait_time = random.uniform(self._min_wait_time, self._max_wait_time) return True
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activates, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): the BlockHeader to initialize. Returns: True """ # Using the current chain head, we need to create a state view so we # can get our config values. state_view = \ BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) settings_view = SettingsView(state_view) self._difficulty = settings_view.get_setting( "sawtooth.truss.difficulty", self._difficulty, int) block_header.consensus = b"Truss" self._nonce = random.uniform( self._min_wait_time, self._max_wait_time) return True
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activates, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): the BlockHeader to initialize. Returns: True """ # Using the current chain head, we need to create a state view so we # can get our config values. state_view = \ BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) settings_view = SettingsView(state_view) self._min_wait_time = settings_view.get_setting( "sawtooth.consensus.min_wait_time", self._min_wait_time, int) self._max_wait_time = settings_view.get_setting( "sawtooth.consensus.max_wait_time", self._max_wait_time, int) self._valid_block_publishers = settings_view.get_setting( "sawtooth.consensus.valid_block_publishers", self._valid_block_publishers, list) block_header.consensus = b"Devmode" self._start_time = time.time() self._wait_time = random.uniform( self._min_wait_time, self._max_wait_time) return True
def _load_consensus(self, block): """Load the consensus module using the state as of the given block.""" if block is not None: return ConsensusFactory.get_configured_consensus_module( block.header_signature, BlockWrapper.state_view_for_block(block, self._state_view_factory)) return ConsensusFactory.get_consensus_module('genesis')
def _load_consensus(self, block): """Load the consensus module using the state as of the given block.""" if block is not None: return ConsensusFactory.get_configured_consensus_module( block.header_signature, BlockWrapper.state_view_for_block( block, self._state_view_factory)) return ConsensusFactory.get_consensus_module('genesis')
def initialize_block(self, previous_block): """Begin building a new candidate block. Args: previous_block (BlockWrapper): The block to base the new block on. Raises: ConsensusNotReady Consensus is not ready to build a block """ # using previous_block so so we can use the setting_cache max_batches = int( self._settings_cache.get_setting( 'sawtooth.publisher.max_batches_per_block', previous_block.state_root_hash, default_value=0)) state_view = BlockWrapper.state_view_for_block( previous_block, self._state_view_factory) public_key = self._identity_signer.get_public_key().as_hex() consensus = self._load_consensus(previous_block, state_view, public_key) batch_injectors = self._load_injectors(previous_block) block_header = BlockHeader( block_num=previous_block.block_num + 1, previous_block_id=previous_block.header_signature, signer_public_key=public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): raise ConsensusNotReady() # create a new scheduler scheduler = self._transaction_executor.create_scheduler( previous_block.state_root_hash) # build the TransactionCommitCache committed_txn_cache = TransactionCommitCache( self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock( self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches, batch_injectors, SettingsView(state_view), self._identity_signer) for batch in self._pending_batches: if self._candidate_block.can_add_batch(): self._candidate_block.add_batch(batch) else: break
def _build_candidate_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) config_view = ConfigView(state_view) max_batches = config_view.get_setting( 'sawtooth.publisher.max_batches_per_block', default_value=0, value_type=int) consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_public_key) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_pubkey=self._identity_public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): LOGGER.debug("Consensus not ready to build candidate block.") return None # create a new scheduler scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCache committed_txn_cache = TransactionCache(self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock(self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches) for batch in self._pending_batches: if self._candidate_block.can_add_batch: self._candidate_block.add_batch(batch) else: break
def _build_candidate_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) settings_view = SettingsView(state_view) max_batches = settings_view.get_setting( 'sawtooth.publisher.max_batches_per_block', default_value=0, value_type=int) consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, config_dir=self._config_dir, validator_id=self._identity_public_key) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_pubkey=self._identity_public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): LOGGER.debug("Consensus not ready to build candidate block.") return None # create a new scheduler scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCache committed_txn_cache = TransactionCache(self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock(self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches) for batch in self._pending_batches: if self._candidate_block.can_add_batch: self._candidate_block.add_batch(batch) else: break
def finalize_block(self, block_header): """Finalize a block to be claimed. Provide any signatures and data updates that need to be applied to the block before it is signed and broadcast to the network. Args: block_header (BlockHeader): The block header for the candidate block that needs to be finalized. Returns: Boolean: True if the candidate block good and should be generated. False if the block should be abandoned. """ # To compute the block hash, we are going to perform a hash using the # previous block ID and the batch IDs contained in the block hasher = hashlib.sha256(block_header.previous_block_id.encode()) for batch_id in block_header.batch_ids: hasher.update(batch_id.encode()) block_hash = hasher.hexdigest() # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) # We need to create a wait certificate for the block and then serialize # that into the block header consensus field. active_key = self._poet_key_state_store.active_key poet_key_state = self._poet_key_state_store[active_key] sealed_signup_data = poet_key_state.sealed_signup_data try: wait_certificate = \ WaitCertificate.create_wait_certificate( poet_enclave_module=poet_enclave_module, sealed_signup_data=sealed_signup_data, wait_timer=self._wait_timer, block_hash=block_hash) block_header.consensus = \ json.dumps(wait_certificate.dump()).encode() except ValueError as ve: LOGGER.error('Failed to create wait certificate: %s', ve) return False LOGGER.debug('Created wait certificate: %s', wait_certificate) return True
def _build_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) self._consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, validator_id=self._identity_public_key) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_pubkey=self._identity_public_key) block_builder = BlockBuilder(block_header) if not self._consensus.initialize_block(block_builder.block_header): LOGGER.debug("Consensus not ready to build candidate block.") return None # Cancel the previous scheduler if it did not complete. if self._scheduler is not None \ and not self._scheduler.complete(block=False): self._scheduler.cancel() # create a new scheduler self._scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCache self._committed_txn_cache = TransactionCache( self._block_cache.block_store) if chain_head.header_signature not in self._block_cache.block_store: # if we opportunistically building a block # we need to check make sure we track that blocks transactions # as recorded. for batch in chain_head.block.batches: for txn in batch.transactions: self._committed_txn_cache.add_txn(txn.header_signature) self._transaction_executor.execute(self._scheduler) for batch in self._pending_batches: self._validate_batch(batch) return block_builder
def _submit_blocks_for_verification(self, blocks): state_view = BlockWrapper.state_view_for_block( self.chain_head, self._state_view_factory) consensus_module = \ ConsensusFactory.get_configured_consensus_module( self.chain_head.header_signature, state_view) for blkw in blocks: self._blocks_processing[blkw.block.header_signature] =\ self._block_validator self._block_validator.submit_blocks_for_verification( blocks, consensus_module, self.on_block_validated)
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activates, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): the BlockHeader to initialize. Returns: True """ # Using the current chain head, we need to create a state view so we # can get our config values. state_view = BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) self._start_time = time.time() settings_view = SettingsView(state_view) self._expected_block_interval = settings_view.get_setting( "sawtooth.consensus.pow.seconds_between_blocks", self._expected_block_interval, int) self._difficulty_adjustment_block_count = settings_view.get_setting( "sawtooth.consensus.pow.difficulty_adjustment_block_count", self._difficulty_adjustment_block_count, int) self._difficulty_tuning_block_count = settings_view.get_setting( "sawtooth.consensus.pow.difficulty_tuning_block_count", self._difficulty_tuning_block_count, int) self._valid_block_publishers = settings_view.get_setting( "sawtooth.consensus.valid_block_publishers", self._valid_block_publishers, list) prev_block = self._block_cache[block_header.previous_block_id] prev_consensus = prev_block.consensus.split(GLUE) if prev_consensus[IDX_POW] != POW: difficulty = INITIAL_DIFFICULTY else: difficulty = self._get_adjusted_difficulty(prev_block, prev_consensus, self._start_time) SOLVER._comm.send([ self._start_time, difficulty, block_header.previous_block_id.encode(), block_header.signer_public_key.encode() ]) LOGGER.info('New block using Gluwa PoW consensus') return True
def __init__(self, block_cache, state_view_factory, data_dir, config_dir, validator_id): super().__init__( block_cache, state_view_factory, data_dir, config_dir, validator_id) self._block_cache = block_cache self._state_view_factory = state_view_factory state_view = \ BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) settings_view = SettingsView(state_view) self._difficulty = settings_view.get_setting( "sawtooth.truss.difficulty", self._difficulty, int)
def _submit_blocks_for_verification(self, blocks): for blkw in blocks: state_view = BlockWrapper.state_view_for_block( self.chain_head, self._state_view_factory) consensus_module = \ ConsensusFactory.get_configured_consensus_module( self.chain_head.header_signature, state_view) validator = BlockValidator( consensus_module=consensus_module, new_block=blkw, chain_head=self._chain_head, block_cache=self._block_cache, state_view_factory=self._state_view_factory, done_cb=self.on_block_validated, executor=self._transaction_executor, squash_handler=self._squash_handler, identity_signing_key=self._identity_signing_key, data_dir=self._data_dir, config_dir=self._config_dir) self._blocks_processing[blkw.block.header_signature] = validator self._executor.submit(validator.run)
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activates, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): the BlockHeader to initialize. Returns: True """ if not self._consensus: LOGGER.debug( "initialize_block: external consensus not regitered\n") return False # Using the current chain head, we need to create a state view so we # can get our config values. state_view = BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) settings_view = SettingsView(state_view) self._min_wait_time = settings_view.get_setting( "bgx.consensus.min_wait_time", self._min_wait_time, float) self._max_wait_time = settings_view.get_setting( "bgx.consensus.max_wait_time", self._max_wait_time, float) self._valid_block_publishers = settings_view.get_setting( "bgx.consensus.valid_block_publishers", self._valid_block_publishers, list) block_header.consensus = self._consensus # b"Devmode" self._start_time = time.time() self._wait_time = random.uniform(self._min_wait_time, self._max_wait_time) LOGGER.debug( "PROXY:initialize_block min_wait_time=%s max_wait_time=%s", self._min_wait_time, self._max_wait_time) return True
def check_publish_block(self, block_header): """Check if a candidate block is ready to be claimed. block_header (BlockHeader): the block_header to be checked if it should be claimed Returns: Boolean: True if the candidate block_header should be claimed. """ # Using the current chain head, we need to create a state view so we # can get our config values. state_view = \ BlockWrapper.state_view_for_block( self._block_cache.block_store.chain_head, self._state_view_factory) settings_view = SettingsView(state_view) self._difficulty = settings_view.get_setting( "sawtooth.truss.difficulty", self._difficulty, int) if any(publisher_key != block_header.signer_public_key for publisher_key in self._valid_block_publishers): return False return proof_of_work(block_header.state_hash, self._nonce)
def _build_candidate_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) # using chain_head so so we can use the setting_cache max_batches = int( self._settings_cache.get_setting( 'sawtooth.publisher.max_batches_per_block', chain_head.state_root_hash, default_value=0)) public_key = self._identity_signer.get_public_key().as_hex() consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, config_dir=self._config_dir, validator_id=public_key) batch_injectors = [] if self._batch_injector_factory is not None: batch_injectors = self._batch_injector_factory.create_injectors( chain_head.identifier) if batch_injectors: LOGGER.debug("Loaded batch injectors: %s", batch_injectors) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_public_key=public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): if not self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = True LOGGER.debug("Consensus not ready to build candidate block.") return None if self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = False LOGGER.debug("Consensus is ready to build candidate block.") # create a new scheduler scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCommitCache committed_txn_cache = TransactionCommitCache( self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock( self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches, batch_injectors, SettingsView(state_view), public_key) for batch in self._pending_batches: if self._candidate_block.can_add_batch: self._candidate_block.add_batch(batch) else: break
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activities, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): The BlockHeader to initialize. Returns: Boolean: True if the candidate block should be built. False if no candidate should be built. """ # If the previous block ID matches our cached one, that means that we # have already determined that even if we initialize the requested # block we would not be able to claim it. So, instead of wasting time # doing all of the checking again, simply short-circuit the failure so # that the validator can go do something more useful. if block_header.previous_block_id == \ PoetBlockPublisher._previous_block_id: return False PoetBlockPublisher._previous_block_id = block_header.previous_block_id # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) # Get our validator registry entry to see what PoET public key # other validators think we are using. validator_registry_view = ValidatorRegistryView(state_view) validator_info = None try: validator_id = block_header.signer_public_key validator_info = \ validator_registry_view.get_validator_info( validator_id=validator_id) except KeyError: pass # If we don't have a validator registry entry, then check the active # key. If we don't have one, then we need to sign up. If we do have # one, then our validator registry entry has not percolated through the # system, so nothing to to but wait. active_poet_public_key = self._poet_key_state_store.active_key if validator_info is None: if active_poet_public_key is None: LOGGER.debug( 'No public key found, so going to register new signup ' 'information') self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) else: # Check if we need to give up on this registration attempt try: nonce = self._poet_key_state_store[ active_poet_public_key].signup_nonce except (ValueError, AttributeError): self._poet_key_state_store.active_key = None LOGGER.warning('Poet Key State Store had inaccessible or ' 'corrupt active key [%s] clearing ' 'key.', active_poet_public_key) return False self._handle_registration_timeout( block_header=block_header, poet_enclave_module=poet_enclave_module, state_view=state_view, signup_nonce=nonce, poet_public_key=active_poet_public_key ) return False # Retrieve the key state corresponding to the PoET public key in our # validator registry entry. poet_key_state = None try: poet_key_state = \ self._poet_key_state_store[ validator_info.signup_info.poet_public_key] except (ValueError, KeyError): pass # If there is no key state associated with the PoET public key that # other validators think we should be using, then we need to create # new signup information as we have no way whatsoever to publish # blocks that other validators will accept. if poet_key_state is None: LOGGER.debug( 'PoET public key %s...%s in validator registry not found in ' 'key state store. Sign up again', validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) # We need to put fake information in the key state store for the # PoET public key the other validators think we are using so that # we don't try to keep signing up. However, we are going to mark # that key state store entry as being refreshed so that we will # never actually try to use it. dummy_data = b64encode(b'No sealed signup data').decode('utf-8') self._poet_key_state_store[ validator_info.signup_info.poet_public_key] = \ PoetKeyState( sealed_signup_data=dummy_data, has_been_refreshed=True, signup_nonce='unknown') return False # Check the key state. If it is marked as being refreshed, then we are # waiting until our PoET public key is updated in the validator # registry and therefore we cannot publish any blocks. if poet_key_state.has_been_refreshed: LOGGER.debug( 'PoET public key %s...%s has been refreshed. Wait for new ' 'key to show up in validator registry.', validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # Check if we need to give up on this registration attempt self._handle_registration_timeout( block_header=block_header, poet_enclave_module=poet_enclave_module, state_view=state_view, signup_nonce=poet_key_state.signup_nonce, poet_public_key=active_poet_public_key ) return False # If the PoET public key in the validator registry is not the active # one, then we need to switch the active key in the key state store. if validator_info.signup_info.poet_public_key != \ active_poet_public_key: active_poet_public_key = validator_info.signup_info.poet_public_key self._poet_key_state_store.active_key = active_poet_public_key # Ensure that the enclave is using the appropriate keys try: unsealed_poet_public_key = \ SignupInfo.unseal_signup_data( poet_enclave_module=poet_enclave_module, sealed_signup_data=poet_key_state.sealed_signup_data) except SystemError: # Signup data is unuseable LOGGER.error( 'Could not unseal signup data associated with PPK: %s..%s', active_poet_public_key[:8], active_poet_public_key[-8:]) self._poet_key_state_store.active_key = None return False assert active_poet_public_key == unsealed_poet_public_key LOGGER.debug( 'Using PoET public key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) LOGGER.debug( 'Unseal signup data: %s...%s', poet_key_state.sealed_signup_data[:8], poet_key_state.sealed_signup_data[-8:]) consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_settings_view = PoetSettingsView(state_view) # If our signup information does not pass the freshness test, then we # know that other validators will reject any blocks we try to claim so # we need to try to sign up again. if consensus_state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=poet_settings_view, block_cache=self._block_cache): LOGGER.info( 'Reject building on block %s: Validator signup information ' 'not committed in a timely manner.', block_header.previous_block_id[:8]) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Using the consensus state for the block upon which we want to # build, check to see how many blocks we have claimed on this chain # with this PoET key. If we have hit the key block claim limit, then # we need to check if the key has been refreshed. if consensus_state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=poet_settings_view): # Because we have hit the limit, check to see if we have already # submitted a validator registry transaction with new signup # information, and therefore a new PoET public key. If not, then # mark this PoET public key in the store as having been refreshed # and register new signup information. Regardless, since we have # hit the key block claim limit, we won't even bother initializing # a block on this chain as it will be rejected by other # validators. poet_key_state = self._poet_key_state_store[active_poet_public_key] if not poet_key_state.has_been_refreshed: LOGGER.info( 'Reached block claim limit for key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) sealed_signup_data = poet_key_state.sealed_signup_data signup_nonce = poet_key_state.signup_nonce self._poet_key_state_store[active_poet_public_key] = \ PoetKeyState( sealed_signup_data=sealed_signup_data, has_been_refreshed=True, signup_nonce=signup_nonce) # Release enclave resources for this identity # This signup will be invalid on all forks that use it, # even if there is a rollback to a point it should be valid. # A more sophisticated policy would be to release signups # only at a block depth where finality probability # is high. SignupInfo.release_signup_data( poet_enclave_module=poet_enclave_module, sealed_signup_data=sealed_signup_data) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) LOGGER.info( 'Reject building on block %s: Validator has reached maximum ' 'number of blocks with key pair.', block_header.previous_block_id[:8]) return False # Verify that we are abiding by the block claim delay (i.e., waiting a # certain number of blocks since our validator registry was added/ # updated). if consensus_state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_header.block_num, validator_registry_view=validator_registry_view, poet_settings_view=poet_settings_view, block_store=self._block_cache.block_store): LOGGER.info( 'Reject building on block %s: Validator has not waited long ' 'enough since registering validator information.', block_header.previous_block_id[:8]) return False # We need to create a wait timer for the block...this is what we # will check when we are asked if it is time to publish the block poet_key_state = self._poet_key_state_store[active_poet_public_key] sealed_signup_data = poet_key_state.sealed_signup_data previous_certificate_id = \ utils.get_previous_certificate_id( block_header=block_header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module) wait_timer = \ WaitTimer.create_wait_timer( poet_enclave_module=poet_enclave_module, sealed_signup_data=sealed_signup_data, validator_address=block_header.signer_public_key, previous_certificate_id=previous_certificate_id, consensus_state=consensus_state, poet_settings_view=poet_settings_view) # NOTE - we do the zTest after we create the wait timer because we # need its population estimate to see if this block would be accepted # by other validators based upon the zTest. # Check to see if by chance we were to be able to claim this block # if it would result in us winning more frequently than statistically # expected. If so, then refuse to initialize the block because other # validators will not accept anyway. if consensus_state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id=block_header.previous_block_id, poet_settings_view=poet_settings_view, population_estimate=wait_timer.population_estimate( poet_settings_view=poet_settings_view), block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.info( 'Reject building on block %s: ' 'Validator (signing public key: %s) is claiming blocks ' 'too frequently.', block_header.previous_block_id[:8], block_header.signer_public_key) return False # At this point, we know that if we are able to claim the block we are # initializing, we will not be prevented from doing so because of PoET # policies. self._wait_timer = wait_timer PoetBlockPublisher._previous_block_id = None LOGGER.debug('Created wait timer: %s', self._wait_timer) return True
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activities, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): The BlockHeader to initialize. Returns: Boolean: True if the candidate block should be built. False if no candidate should be built. """ # If the previous block ID matches our cached one, that means that we # have already determined that even if we initialize the requested # block we would not be able to claim it. So, instead of wasting time # doing all of the checking again, simply short-circuit the failure so # that the validator can go do something more useful. if block_header.previous_block_id == \ PoetBlockPublisher._previous_block_id: return False PoetBlockPublisher._previous_block_id = block_header.previous_block_id # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) # Get our validator registry entry to see what PoET public key # other validators think we are using. validator_registry_view = ValidatorRegistryView(state_view) validator_info = None try: validator_id = block_header.signer_pubkey validator_info = \ validator_registry_view.get_validator_info( validator_id=validator_id) except KeyError: pass # If we don't have a validator registry entry, then check the active # key. If we don't have one, then we need to sign up. # If we do have one, then our validator registry entry has not # percolated through the system, so nothing to to but wait. active_poet_public_key = self._poet_key_state_store.active_key if validator_info is None: if active_poet_public_key is None: LOGGER.debug( 'No public key found, so going to register new signup ' 'information') self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Otherwise, we have a current validator registry entry. In that # case, we need to make sure that we are using the same PPK that the # other validators think we are using. If not, then we need to switch # the PoET enclave to using the correct keys. elif validator_info.signup_info.poet_public_key != \ active_poet_public_key: # Retrieve the key state corresponding to the PoET public key and # use it to re-establish the key used by the enclave. Also update # the active PoET public key. poet_key_state = \ self._poet_key_state_store[ validator_info.signup_info.poet_public_key] active_poet_public_key = \ SignupInfo.unseal_signup_data( poet_enclave_module=poet_enclave_module, sealed_signup_data=poet_key_state.sealed_signup_data) self._poet_key_state_store.active_key = active_poet_public_key assert active_poet_public_key == \ validator_info.signup_info.poet_public_key LOGGER.debug( 'Switched to public key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) LOGGER.debug( 'Unseal signup data: %s...%s', poet_key_state.sealed_signup_data[:8], poet_key_state.sealed_signup_data[-8:]) consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_settings_view = PoetSettingsView(state_view) # If our signup information does not pass the freshness test, then we # know that other validators will reject any blocks we try to claim so # we need to try to sign up again. if consensus_state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=poet_settings_view, block_cache=self._block_cache): LOGGER.info( 'Reject building on block %s: Validator signup information ' 'not committed in a timely manner.', block_header.previous_block_id[:8]) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Using the consensus state for the block upon which we want to # build, check to see how many blocks we have claimed on this chain # with this PoET key. If we have hit the key block claim limit, then # we need to check if the key has been refreshed. if consensus_state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=poet_settings_view): # Because we have hit the limit, check to see if we have already # submitted a validator registry transaction with new signup # information, and therefore a new PoET public key. If not, then # mark this PoET public key in the store as having been refreshed # and register new signup information. Regardless, since we have # hit the key block claim limit, we won't even bother initializing # a block on this chain as it will be rejected by other # validators. poet_key_state = self._poet_key_state_store[active_poet_public_key] if not poet_key_state.has_been_refreshed: LOGGER.info( 'Reached block claim limit for key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) sealed_signup_data = poet_key_state.sealed_signup_data self._poet_key_state_store[active_poet_public_key] = \ PoetKeyState( sealed_signup_data=sealed_signup_data, has_been_refreshed=True) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) LOGGER.info( 'Reject building on block %s: Validator has reached maximum ' 'number of blocks with key pair.', block_header.previous_block_id[:8]) return False # Verify that we are abiding by the block claim delay (i.e., waiting a # certain number of blocks since our validator registry was added/ # updated). if consensus_state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_header.block_num, validator_registry_view=validator_registry_view, poet_settings_view=poet_settings_view, block_store=self._block_cache.block_store): LOGGER.info( 'Reject building on block %s: Validator has not waited long ' 'enough since registering validator information.', block_header.previous_block_id[:8]) return False # We need to create a wait timer for the block...this is what we # will check when we are asked if it is time to publish the block previous_certificate_id = \ utils.get_previous_certificate_id( block_header=block_header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module) wait_timer = \ WaitTimer.create_wait_timer( poet_enclave_module=poet_enclave_module, validator_address=block_header.signer_pubkey, previous_certificate_id=previous_certificate_id, consensus_state=consensus_state, poet_settings_view=poet_settings_view) # NOTE - we do the zTest after we create the wait timer because we # need its population estimate to see if this block would be accepted # by other validators based upon the zTest. # Check to see if by chance we were to be able to claim this block # if it would result in us winning more frequently than statistically # expected. If so, then refuse to initialize the block because other # validators will not accept anyway. if consensus_state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id=block_header.previous_block_id, poet_settings_view=poet_settings_view, population_estimate=wait_timer.population_estimate( poet_settings_view=poet_settings_view), block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.info( 'Reject building on block %s: Validator is claiming blocks ' 'too frequently.', block_header.previous_block_id[:8]) return False # At this point, we know that if we are able to claim the block we are # initializing, we will not be prevented from doing so because of PoET # policies. self._wait_timer = wait_timer PoetBlockPublisher._previous_block_id = None LOGGER.debug('Created wait timer: %s', self._wait_timer) return True
def compare_forks(self, cur_fork_head, new_fork_head): """Given the head of two forks, return which should be the fork that the validator chooses. When this is called both forks consist of only valid blocks. Args: cur_fork_head (Block): The current head of the block chain. new_fork_head (Block): The head of the fork that is being evaluated. Returns: Boolean: True if the new chain should replace the current chain. False if the new chain should be discarded. """ chosen_fork_head = None state_view = \ BlockWrapper.state_view_for_block( block_wrapper=cur_fork_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) current_fork_wait_certificate = \ utils.deserialize_wait_certificate( block=cur_fork_head, poet_enclave_module=poet_enclave_module) new_fork_wait_certificate = \ utils.deserialize_wait_certificate( block=new_fork_head, poet_enclave_module=poet_enclave_module) # If we ever get a new fork head that is not a PoET block, then bail # out. This should never happen, but defensively protect against it. if new_fork_wait_certificate is None: raise \ TypeError( 'New fork head {} is not a PoET block'.format( new_fork_head.identifier[:8])) # Criterion #1: If the current fork head is not PoET, then check to see # if the new fork head is building on top of it. That would be okay. # However if not, then we don't have a good deterministic way of # choosing a winner. Again, the latter should never happen, but # defensively protect against it. if current_fork_wait_certificate is None: if new_fork_head.previous_block_id == cur_fork_head.identifier: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork head switches consensus to PoET', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8]) chosen_fork_head = new_fork_head else: raise \ TypeError( 'Trying to compare a PoET block {} to a non-PoET ' 'block {} that is not the direct predecessor'.format( new_fork_head.identifier[:8], cur_fork_head.identifier[:8])) # Criterion #2: If they share the same immediate previous block, # then the one with the smaller wait duration is chosen elif cur_fork_head.previous_block_id == \ new_fork_head.previous_block_id: if current_fork_wait_certificate.duration < \ new_fork_wait_certificate.duration: LOGGER.info( 'Choose current fork %s over new fork %s: ' 'Current fork wait duration (%f) less than new fork wait ' 'duration (%f)', cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], current_fork_wait_certificate.duration, new_fork_wait_certificate.duration) chosen_fork_head = cur_fork_head elif new_fork_wait_certificate.duration < \ current_fork_wait_certificate.duration: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork wait duration (%f) less than current fork wait ' 'duration (%f)', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_wait_certificate.duration, current_fork_wait_certificate.duration) chosen_fork_head = new_fork_head # Criterion #3: If they don't share the same immediate previous # block, then the one with the higher aggregate local mean wins else: # Get the consensus state for the current fork head and the # block immediately before the new fork head (as we haven't # committed to the block yet). So that the new fork doesn't # have to fight with one hand tied behind its back, add the # new fork head's wait certificate's local mean to the # aggregate local mean for the predecessor block's consensus # state for the comparison. current_fork_consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=cur_fork_head.identifier, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) new_fork_consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=new_fork_head.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) new_fork_aggregate_local_mean = \ new_fork_consensus_state.aggregate_local_mean + \ new_fork_wait_certificate.local_mean if current_fork_consensus_state.aggregate_local_mean > \ new_fork_aggregate_local_mean: LOGGER.info( 'Choose current fork %s over new fork %s: ' 'Current fork aggregate local mean (%f) greater than new ' 'fork aggregate local mean (%f)', cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], current_fork_consensus_state.aggregate_local_mean, new_fork_aggregate_local_mean) chosen_fork_head = cur_fork_head elif new_fork_aggregate_local_mean > \ current_fork_consensus_state.aggregate_local_mean: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork aggregate local mean (%f) greater than current ' 'fork aggregate local mean (%f)', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_aggregate_local_mean, current_fork_consensus_state.aggregate_local_mean) chosen_fork_head = new_fork_head # Criterion #4: If we have gotten to this point and we have not chosen # yet, we are going to fall back on using the block identifiers # (header signatures) . The lexicographically larger one will be the # chosen one. The chance that they are equal are infinitesimally # small. if chosen_fork_head is None: if cur_fork_head.header_signature > \ new_fork_head.header_signature: LOGGER.info( 'Choose current fork %s over new fork %s: ' 'Current fork header signature (%s) greater than new fork ' 'header signature (%s)', cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8]) chosen_fork_head = cur_fork_head else: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork header signature (%s) greater than current fork ' 'header signature (%s)', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8]) chosen_fork_head = new_fork_head # Now that we have chosen a fork for the chain head, if we chose the # new fork and it is a PoET block (i.e., it has a wait certificate), # we need to create consensus state store information for the new # fork's chain head. if chosen_fork_head == new_fork_head: # Get the state view for the previous block in the chain so we can # create a PoET enclave previous_block = None try: previous_block = \ self._block_cache[new_fork_head.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) validator_registry_view = ValidatorRegistryView(state_view) try: # Get the validator info for the validator that claimed the # fork head validator_info = \ validator_registry_view.get_validator_info( new_fork_head.header.signer_public_key) # Get the consensus state for the new fork head's previous # block, let the consensus state update itself appropriately # based upon the validator claiming a block, and then # associate the consensus state with the new block in the # store. consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=new_fork_head.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) consensus_state.validator_did_claim_block( validator_info=validator_info, wait_certificate=new_fork_wait_certificate, poet_settings_view=PoetSettingsView(state_view)) self._consensus_state_store[new_fork_head.identifier] = \ consensus_state LOGGER.debug('Create consensus state: BID=%s, ALM=%f, TBCC=%d', new_fork_head.identifier[:8], consensus_state.aggregate_local_mean, consensus_state.total_block_claim_count) except KeyError: # This _should_ never happen. The new potential fork head # has to have been a PoET block and for it to be verified # by the PoET block verifier, it must have been signed by # validator in the validator registry. If not found, we # are going to just stick with the current fork head. LOGGER.error( 'New fork head claimed by validator not in validator ' 'registry: %s...%s', new_fork_head.header.signer_public_key[:8], new_fork_head.header.signer_public_key[-8:]) chosen_fork_head = cur_fork_head return chosen_fork_head == new_fork_head
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activities, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): The BlockHeader to initialize. Returns: Boolean: True if the candidate block should be built. False if no candidate should be built. """ # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module(state_view) # Check the consensus state to see if we have current sealed signup # information. consensus_state = \ utils.get_consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) if consensus_state is not None and \ consensus_state.sealed_signup_data is not None: # Check to see if the sealed signup data we have cached is the # same as what is in the consensus store. If not, then it is # stale and so we need to unseal it and update our cached copy. if consensus_state.sealed_signup_data != \ PoetBlockPublisher._sealed_signup_data: LOGGER.debug('Unseal signup data %s...%s', consensus_state.sealed_signup_data[:8], consensus_state.sealed_signup_data[-8:]) PoetBlockPublisher._sealed_signup_data = \ consensus_state.sealed_signup_data PoetBlockPublisher._poet_public_key = \ SignupInfo.unseal_signup_data( poet_enclave_module=poet_enclave_module, validator_address=block_header.signer_pubkey, sealed_signup_data=consensus_state.sealed_signup_data) # Otherwise, if we don't already have a public key, we need to create # signup information and create a transaction to add it to the # validator registry. elif PoetBlockPublisher._poet_public_key is None: LOGGER.debug( 'No public key found, so going to register new signup ' 'information') self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Otherwise, at this point we need to check the validator registry to # see if our _current_ validator registry information was added to the # validator registry. else: validator_registry_view = ValidatorRegistryView(state_view) try: validator_id = block_header.signer_pubkey validator_info = \ validator_registry_view.get_validator_info( validator_id=validator_id) LOGGER.debug( 'Our Validator Registry Entry: Name=%s, ID=%s...%s, PoET ' 'public key=%s...%s', validator_info.name, validator_info.id[:8], validator_info.id[-8:], validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # We need to verify that our validator registry entry is # current - basically this means verifying that the PoET # public key that will be used to verify the validity of # our wait certificates is the PoET public key matching our # current private key. if validator_info.signup_info.poet_public_key != \ PoetBlockPublisher._poet_public_key: LOGGER.debug( 'Our Validator Registry Entry PoET public key ' '(%s...%s) doesn' 't match the PoET public key ' 'expected (%s...%s)', validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:], PoetBlockPublisher._poet_public_key[:8], PoetBlockPublisher._poet_public_key[-8:]) return False # At this point, we know that we are in the validator registry # and the entry is current. We can save the sealed signup # data to the consensus state for the previous block and can # clear out our cached copies of the sealed signup data and # the PoET public key. if consensus_state is None: consensus_state = ConsensusState() consensus_state.sealed_signup_data = \ PoetBlockPublisher._sealed_signup_data self._consensus_state_store[block_header.previous_block_id] = \ consensus_state PoetBlockPublisher._sealed_signup_data = None PoetBlockPublisher._poet_public_key = None except KeyError: LOGGER.debug( 'We cannot initialize the block because our PoET signup ' 'information is not in the validator registry') return False # Since we are registering, don't bother trying to initialize # the block return False # Create a list of certificates for the wait timer. This seems to have # a little too much knowledge of the WaitTimer implementation, but # there is no use getting more than # WaitTimer.certificate_sample_length wait certificates. certificates = \ utils.build_certificate_list( block_header=block_header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module, maximum_number=WaitTimer.certificate_sample_length) # We need to create a wait timer for the block...this is what we # will check when we are asked if it is time to publish the block self._wait_timer = \ WaitTimer.create_wait_timer( poet_enclave_module=poet_enclave_module, validator_address=block_header.signer_pubkey, certificates=list(certificates)) LOGGER.debug('Created wait timer: %s', self._wait_timer) return True
def compare_forks(self, cur_fork_head, new_fork_head): """Given the head of two forks, return which should be the fork that the validator chooses. When this is called both forks consist of only valid blocks. Args: cur_fork_head (Block): The current head of the block chain. new_fork_head (Block): The head of the fork that is being evaluated. Returns: Boolean: True if the new chain should replace the current chain. False if the new chain should be discarded. """ chosen_fork_head = None if new_fork_head.block_num > cur_fork_head.block_num: LOGGER.info( 'Chain with new fork head %s...%s longer (%d) than current ' 'chain head %s...%s (%d)', new_fork_head.header_signature[:8], new_fork_head.header_signature[-8:], new_fork_head.block_num, cur_fork_head.header_signature[:8], cur_fork_head.header_signature[-8:], cur_fork_head.block_num) chosen_fork_head = new_fork_head elif new_fork_head.block_num < cur_fork_head.block_num: LOGGER.info( 'Chain with current head %s...%s longer (%d) than new fork ' 'head %s...%s (%d)', cur_fork_head.header_signature[:8], cur_fork_head.header_signature[-8:], cur_fork_head.block_num, new_fork_head.header_signature[:8], new_fork_head.header_signature[-8:], new_fork_head.block_num) chosen_fork_head = cur_fork_head elif new_fork_head.header_signature > cur_fork_head.header_signature: LOGGER.info( 'Signature of new fork head (%s...%s) > than current ' '(%s...%s)', new_fork_head.header_signature[:8], new_fork_head.header_signature[-8:], cur_fork_head.header_signature[:8], cur_fork_head.header_signature[-8:]) chosen_fork_head = new_fork_head else: LOGGER.info( 'Signature of current fork head (%s...%s) >= than new ' '(%s...%s)', cur_fork_head.header_signature[:8], cur_fork_head.header_signature[-8:], new_fork_head.header_signature[:8], new_fork_head.header_signature[-8:]) chosen_fork_head = cur_fork_head # Now that we have chosen a fork for the chain head, if # we chose the new fork, we need to create consensus state # store information for the new fork's chain head. if chosen_fork_head == new_fork_head: # Get the state view for the previous block in the chain so we can # create a PoET enclave previous_block = None try: previous_block = \ self._block_cache[new_fork_head.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module(state_view) validator_registry_view = ValidatorRegistryView(state_view) try: # Get the validator info for the validator that claimed the # fork head validator_info = \ validator_registry_view.get_validator_info( new_fork_head.header.signer_pubkey) # Get the consensus state for the new fork head's previous # block and update the consensus-wide statistics for the new # fork head. consensus_state = \ utils.get_consensus_state_for_block_id( block_id=new_fork_head.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) consensus_state.total_block_claim_count += 1 # Get and update the validator state/statistics for the # validator that claimed the new fork head. validator_state = \ utils.get_current_validator_state( validator_info=validator_info, consensus_state=consensus_state, block_cache=self._block_cache) consensus_state.set_validator_state( validator_id=validator_info.id, validator_state=utils.create_next_validator_state( validator_info=validator_info, current_validator_state=validator_state)) # Store the updated consensus state for this block. self._consensus_state_store[new_fork_head.identifier] = \ consensus_state LOGGER.debug('Create consensus state: BID=%s, TBCC=%d', new_fork_head.identifier[:8], consensus_state.total_block_claim_count) except KeyError: # This _should_ never happen. The new potential fork head # has to have been a PoET block and for it to be verified # by the PoET block verifier, it must have been signed by # validator in the validator registry. If not found, we # are going to just stick with the current fork head. LOGGER.error( 'New fork head claimed by validator not in validator ' 'registry: %s...%s', new_fork_head.header.signer_pubkey[:8], new_fork_head.header.signer_pubkey[-8:]) chosen_fork_head = cur_fork_head return chosen_fork_head == new_fork_head
def verify_block(self, block_wrapper): """Check that the block received conforms to the consensus rules. Args: block_wrapper (BlockWrapper): The block to validate. Returns: Boolean: True if the Block is valid, False if the block is invalid. """ # Get the state view for the previous block in the chain so we can # create a PoET enclave and validator registry view previous_block = None try: previous_block = \ self._block_cache[block_wrapper.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir) validator_registry_view = ValidatorRegistryView(state_view) # Grab the validator info based upon the block signer's public # key try: validator_info = \ validator_registry_view.get_validator_info( block_wrapper.header.signer_pubkey) except KeyError: LOGGER.error( 'Block %s rejected: Received block from an unregistered ' 'validator %s...%s', block_wrapper.identifier[:8], block_wrapper.header.signer_pubkey[:8], block_wrapper.header.signer_pubkey[-8:]) return False LOGGER.debug( 'Block Signer Name=%s, ID=%s...%s, PoET public key=' '%s...%s', validator_info.name, validator_info.id[:8], validator_info.id[-8:], validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # For the candidate block, reconstitute the wait certificate # and verify that it is valid wait_certificate = \ utils.deserialize_wait_certificate( block=block_wrapper, poet_enclave_module=poet_enclave_module) if wait_certificate is None: LOGGER.error( 'Block %s rejected: Block from validator %s (ID=%s...%s) was ' 'not created by PoET consensus module', block_wrapper.identifier[:8], validator_info.name, validator_info.id[:8], validator_info.id[-8:]) return False # Get the consensus state and PoET configuration view for the block # that is being built upon consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_wrapper.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_config_view = PoetConfigView(state_view=state_view) previous_certificate_id = \ utils.get_previous_certificate_id( block_header=block_wrapper.header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module) try: wait_certificate.check_valid( poet_enclave_module=poet_enclave_module, previous_certificate_id=previous_certificate_id, poet_public_key=validator_info.signup_info.poet_public_key, consensus_state=consensus_state, poet_config_view=poet_config_view) except ValueError as error: LOGGER.error( 'Block %s rejected: Wait certificate check failed - %s', block_wrapper.identifier[:8], error) return False # Reject the block if the validator signup information fails the # freshness check. if consensus_state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_config_view=poet_config_view, block_cache=self._block_cache): LOGGER.error( 'Block %s rejected: Validator signup information not ' 'committed in a timely manner.', block_wrapper.identifier[:8]) return False # Reject the block if the validator has already claimed the key bock # limit for its current PoET key pair. if consensus_state.validator_has_claimed_block_limit( validator_info=validator_info, poet_config_view=poet_config_view): LOGGER.error( 'Block %s rejected: Validator has reached maximum number of ' 'blocks with key pair.', block_wrapper.identifier[:8]) return False # Reject the block if the validator has not waited the required number # of blocks between when the block containing its validator registry # transaction was committed to the chain and trying to claim this # block if consensus_state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_wrapper.block_num, validator_registry_view=validator_registry_view, poet_config_view=poet_config_view, block_store=self._block_cache.block_store): LOGGER.error( 'Block %s rejected: Validator has not waited long enough ' 'since registering validator information.', block_wrapper.identifier[:8]) return False # Reject the block if the validator is claiming blocks at a rate that # is more frequent than is statistically allowed (i.e., zTest) if consensus_state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id=block_wrapper.previous_block_id, poet_config_view=poet_config_view, population_estimate=wait_certificate.population_estimate( poet_config_view=poet_config_view), block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.error( 'Block %s rejected: Validator is claiming blocks too ' 'frequently.', block_wrapper.identifier[:8]) return False return True
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activities, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): The BlockHeader to initialize. Returns: Boolean: True if the candidate block should be built. False if no candidate should be built. """ # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module(state_view) # Get our validator registry entry to see what PoET public key # other validators think we are using. validator_registry_view = ValidatorRegistryView(state_view) validator_info = None try: validator_id = block_header.signer_pubkey validator_info = \ validator_registry_view.get_validator_info( validator_id=validator_id) except KeyError: pass # If we don't have a validator registry entry, then check our cached # PoET public key. If we don't have one, then we need to sign up. # If we do have one, then our validator registry entry has not # percolated through the system, so nothing to to but wait. if validator_info is None: if PoetBlockPublisher._poet_public_key is None: LOGGER.debug( 'No public key found, so going to register new signup ' 'information') self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Otherwise, we have a current validator registry entry. In that # case, we need to make sure that we are using the same PPK that the # other validators think we are using. If not, then we need to switch # the PoET enclave to using the correct keys. elif validator_info.signup_info.poet_public_key != \ PoetBlockPublisher._poet_public_key: # Retrieve the key state corresponding to the PoET public key and # use it to re-establish the key used by the enclave. poet_key_state = \ self._poet_key_state_store[ validator_info.signup_info.poet_public_key] PoetBlockPublisher._poet_public_key = \ SignupInfo.unseal_signup_data( poet_enclave_module=poet_enclave_module, validator_address=block_header.signer_pubkey, sealed_signup_data=poet_key_state.sealed_signup_data) assert PoetBlockPublisher._poet_public_key == \ validator_info.signup_info.poet_public_key LOGGER.debug('Switched to public key: %s...%s', PoetBlockPublisher._poet_public_key[:8], PoetBlockPublisher._poet_public_key[-8:]) LOGGER.debug('Unseal signup data: %s...%s', poet_key_state.sealed_signup_data[:8], poet_key_state.sealed_signup_data[-8:]) consensus_state = \ utils.get_consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) validator_state = \ utils.get_current_validator_state( validator_info=validator_info, consensus_state=consensus_state, block_cache=self._block_cache) poet_config_view = PoetConfigView(state_view) # Using the consensus state for the block upon which we want to # build, check to see how many blocks we have claimed on this chain # with this PoET key. If we have hit the key block claim limit, then # we need to check if the key has been refreshed. key_block_claim_limit = poet_config_view.key_block_claim_limit if validator_state.poet_public_key == \ PoetBlockPublisher._poet_public_key and \ validator_state.key_block_claim_count >= \ key_block_claim_limit: # Because we have hit the limit, check to see if we have already # submitted a validator registry transaction with new signup # information, and therefore a new PoET public key. If not, then # mark this PoET public key in the store as having been refreshed # and register new signup information. Regardless, since we have # hit the key block claim limit, we won't even bother initializing # a block on this chain as it will be rejected by other # validators. poet_key_state = \ self._poet_key_state_store[ PoetBlockPublisher._poet_public_key] if not poet_key_state.has_been_refreshed: LOGGER.info( 'Reached block claim limit (%d) for key for key: %s...%s', key_block_claim_limit, PoetBlockPublisher._poet_public_key[:8], PoetBlockPublisher._poet_public_key[-8:]) sealed_signup_data = poet_key_state.sealed_signup_data self._poet_key_state_store[ PoetBlockPublisher._poet_public_key] = \ PoetKeyState( sealed_signup_data=sealed_signup_data, has_been_refreshed=True) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Verify that we are abiding by the block claim delay (i.e., waiting a # certain number of blocks since our validator registry was added/ # updated). # While having a block claim delay is nice, it turns out that in # practice the claim delay should not be more than one less than # the number of validators. It helps to imagine the scenario # where each validator hits their block claim limit in sequential # blocks and their new validator registry information is updated # in the following block by another validator, assuming that there # were no forks. If there are N validators, once all N validators # have updated their validator registry information, there will # have been N-1 block commits and the Nth validator will only be # able to get its updated validator registry information updated # if the first validator that kicked this off is now able to claim # a block. If the block claim delay was greater than or equal to # the number of validators, at this point no validators would be # able to claim a block. number_of_validators = \ len(validator_registry_view.get_validators()) block_claim_delay = \ min( poet_config_view.block_claim_delay, number_of_validators - 1) # While a validator network is starting up, we need to be careful # about applying the block claim delay because if we are too # aggressive we will get ourselves into a situation where the # block claim delay will prevent any validators from claiming # blocks. So, until we get at least block_claim_delay blocks # we are going to choose not to enforce the delay. if consensus_state.total_block_claim_count > block_claim_delay: blocks_since_registration = \ block_header.block_num - \ validator_state.commit_block_number - 1 if block_claim_delay > blocks_since_registration: return False # Create a list of certificates for the wait timer. This seems to # have a little too much knowledge of the WaitTimer implementation, # but there is no use getting more than # WaitTimer.certificate_sample_length wait certificates. certificates = \ utils.build_certificate_list( block_header=block_header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module, maximum_number=WaitTimer.certificate_sample_length) # We need to create a wait timer for the block...this is what we # will check when we are asked if it is time to publish the block self._wait_timer = \ WaitTimer.create_wait_timer( poet_enclave_module=poet_enclave_module, validator_address=block_header.signer_pubkey, certificates=list(certificates)) LOGGER.debug('Created wait timer: %s', self._wait_timer) return True
def verify_block(self, block_wrapper): """Check that the block received conforms to the consensus rules. Args: block_wrapper (BlockWrapper): The block to validate. Returns: Boolean: True if the Block is valid, False if the block is invalid. """ # Get the state view for the previous block in the chain so we can # create a PoET enclave and validator registry view previous_block = None try: previous_block = \ self._block_cache[block_wrapper.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) validator_registry_view = ValidatorRegistryView(state_view) # Grab the validator info based upon the block signer's public # key try: validator_info = \ validator_registry_view.get_validator_info( block_wrapper.header.signer_pubkey) except KeyError: LOGGER.error( 'Block %s rejected: Received block from an unregistered ' 'validator %s...%s', block_wrapper.identifier[:8], block_wrapper.header.signer_pubkey[:8], block_wrapper.header.signer_pubkey[-8:]) return False LOGGER.debug( 'Block Signer Name=%s, ID=%s...%s, PoET public key=' '%s...%s', validator_info.name, validator_info.id[:8], validator_info.id[-8:], validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # For the candidate block, reconstitute the wait certificate # and verify that it is valid wait_certificate = \ utils.deserialize_wait_certificate( block=block_wrapper, poet_enclave_module=poet_enclave_module) if wait_certificate is None: LOGGER.error( 'Block %s rejected: Block from validator %s (ID=%s...%s) was ' 'not created by PoET consensus module', block_wrapper.identifier[:8], validator_info.name, validator_info.id[:8], validator_info.id[-8:]) return False # Get the consensus state and PoET configuration view for the block # that is being built upon consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_wrapper.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_settings_view = PoetSettingsView(state_view=state_view) previous_certificate_id = \ utils.get_previous_certificate_id( block_header=block_wrapper.header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module) try: wait_certificate.check_valid( poet_enclave_module=poet_enclave_module, previous_certificate_id=previous_certificate_id, poet_public_key=validator_info.signup_info.poet_public_key, consensus_state=consensus_state, poet_settings_view=poet_settings_view) except ValueError as error: LOGGER.error( 'Block %s rejected: Wait certificate check failed - %s', block_wrapper.identifier[:8], error) return False # Reject the block if the validator signup information fails the # freshness check. if consensus_state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=poet_settings_view, block_cache=self._block_cache): LOGGER.error( 'Block %s rejected: Validator signup information not ' 'committed in a timely manner.', block_wrapper.identifier[:8]) return False # Reject the block if the validator has already claimed the key block # limit for its current PoET key pair. if consensus_state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=poet_settings_view): LOGGER.error( 'Block %s rejected: Validator has reached maximum number of ' 'blocks with key pair.', block_wrapper.identifier[:8]) return False # Reject the block if the validator has not waited the required number # of blocks between when the block containing its validator registry # transaction was committed to the chain and trying to claim this # block if consensus_state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_wrapper.block_num, validator_registry_view=validator_registry_view, poet_settings_view=poet_settings_view, block_store=self._block_cache.block_store): LOGGER.error( 'Block %s rejected: Validator has not waited long enough ' 'since registering validator information.', block_wrapper.identifier[:8]) return False # Reject the block if the validator is claiming blocks at a rate that # is more frequent than is statistically allowed (i.e., zTest) if consensus_state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id=block_wrapper.previous_block_id, poet_settings_view=poet_settings_view, population_estimate=wait_certificate.population_estimate( poet_settings_view=poet_settings_view), block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.error( 'Block %s rejected: Validator is claiming blocks too ' 'frequently.', block_wrapper.identifier[:8]) return False return True
def verify_block(self, block_wrapper): """Check that the block received conforms to the consensus rules. Args: block_wrapper (BlockWrapper): The block to validate. Returns: Boolean: True if the Block is valid, False if the block is invalid. """ # Get the state view for the previous block in the chain so we can # create a PoET enclave and validator registry view previous_block = None try: previous_block = \ self._block_cache[block_wrapper.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module(state_view) validator_registry_view = ValidatorRegistryView(state_view) # Grab the validator info based upon the block signer's public # key try: validator_info = \ validator_registry_view.get_validator_info( block_wrapper.header.signer_pubkey) except KeyError: LOGGER.error( 'Block %s rejected: Received block from an unregistered ' 'validator %s...%s', block_wrapper.identifier[:8], block_wrapper.header.signer_pubkey[:8], block_wrapper.header.signer_pubkey[-8:]) return False LOGGER.debug( 'Block Signer Name=%s, ID=%s...%s, PoET public key=' '%s...%s', validator_info.name, validator_info.id[:8], validator_info.id[-8:], validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # Create a list of certificates leading up to this block. # This seems to have a little too much knowledge of the # WaitTimer implementation, but there is no use getting more # than WaitTimer.certificate_sample_length wait certificates. certificates = \ utils.build_certificate_list( block_header=block_wrapper.header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module, maximum_number=WaitTimer.certificate_sample_length) # For the candidate block, reconstitute the wait certificate # and verify that it is valid wait_certificate = \ utils.deserialize_wait_certificate( block=block_wrapper, poet_enclave_module=poet_enclave_module) if wait_certificate is None: LOGGER.error( 'Block %s rejected: Block from validator %s (ID=%s...%s) was ' 'not created by PoET consensus module', block_wrapper.identifier[:8], validator_info.name, validator_info.id[:8], validator_info.id[-8:]) return False wait_certificate.check_valid( poet_enclave_module=poet_enclave_module, certificates=certificates, poet_public_key=validator_info.signup_info.poet_public_key) # Get the consensus state for the block that is being built upon and # then fetch the validator state for this validator consensus_state = \ utils.get_consensus_state_for_block_id( block_id=block_wrapper.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) validator_state = \ utils.get_current_validator_state( validator_info=validator_info, consensus_state=consensus_state, block_cache=self._block_cache) poet_config_view = PoetConfigView(state_view=state_view) # Reject the block if the validator has already claimed the key bock # limit for its current PoET key pair. key_block_claim_limit = poet_config_view.key_block_claim_limit if utils.validator_has_claimed_maximum_number_of_blocks( validator_info=validator_info, validator_state=validator_state, key_block_claim_limit=key_block_claim_limit): LOGGER.error( 'Block %s rejected: Validator has reached maximum number of ' 'blocks with key pair.', block_wrapper.identifier[:8]) return False # Reject the block if the validator has not waited the required number # of blocks between when the block containing its validator registry # transaction was committed to the chain and trying to claim this # block if utils.validator_has_claimed_too_early( validator_info=validator_info, consensus_state=consensus_state, block_number=block_wrapper.block_num, validator_registry_view=validator_registry_view, poet_config_view=poet_config_view, block_store=self._block_cache.block_store): LOGGER.error( 'Block %s rejected: Validator has not waited long enough ' 'since registering validator information.', block_wrapper.identifier[:8]) return False # Reject the block if the validator is claiming blocks at a rate that # is more frequent than is statistically allowed (i.e., zTest) if utils.validator_has_claimed_too_frequently( validator_info=validator_info, previous_block_id=block_wrapper.previous_block_id, consensus_state=consensus_state, poet_config_view=poet_config_view, population_estimate=wait_certificate.population_estimate, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.error( 'Block %s rejected: Validator is claiming blocks too ' 'frequently.', block_wrapper.identifier[:8]) return False return True
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activities, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): The BlockHeader to initialize. Returns: Boolean: True if the candidate block should be built. False if no candidate should be built. """ # If the previous block ID matches our cached one, that means that we # have already determined that even if we initialize the requested # block we would not be able to claim it. So, instead of wasting time # doing all of the checking again, simply short-circuit the failure so # that the validator can go do something more useful. if block_header.previous_block_id == \ PoetBlockPublisher._previous_block_id: return False PoetBlockPublisher._previous_block_id = block_header.previous_block_id # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module(state_view) # Get our validator registry entry to see what PoET public key # other validators think we are using. validator_registry_view = ValidatorRegistryView(state_view) validator_info = None try: validator_id = block_header.signer_pubkey validator_info = \ validator_registry_view.get_validator_info( validator_id=validator_id) except KeyError: pass # If we don't have a validator registry entry, then check our cached # PoET public key. If we don't have one, then we need to sign up. # If we do have one, then our validator registry entry has not # percolated through the system, so nothing to to but wait. if validator_info is None: if PoetBlockPublisher._poet_public_key is None: LOGGER.debug( 'No public key found, so going to register new signup ' 'information') self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Otherwise, we have a current validator registry entry. In that # case, we need to make sure that we are using the same PPK that the # other validators think we are using. If not, then we need to switch # the PoET enclave to using the correct keys. elif validator_info.signup_info.poet_public_key != \ PoetBlockPublisher._poet_public_key: # Retrieve the key state corresponding to the PoET public key and # use it to re-establish the key used by the enclave. poet_key_state = \ self._poet_key_state_store[ validator_info.signup_info.poet_public_key] PoetBlockPublisher._poet_public_key = \ SignupInfo.unseal_signup_data( poet_enclave_module=poet_enclave_module, validator_address=block_header.signer_pubkey, sealed_signup_data=poet_key_state.sealed_signup_data) assert PoetBlockPublisher._poet_public_key == \ validator_info.signup_info.poet_public_key LOGGER.debug('Switched to public key: %s...%s', PoetBlockPublisher._poet_public_key[:8], PoetBlockPublisher._poet_public_key[-8:]) LOGGER.debug('Unseal signup data: %s...%s', poet_key_state.sealed_signup_data[:8], poet_key_state.sealed_signup_data[-8:]) consensus_state = \ utils.get_consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) validator_state = \ utils.get_current_validator_state( validator_info=validator_info, consensus_state=consensus_state, block_cache=self._block_cache) poet_config_view = PoetConfigView(state_view) # Using the consensus state for the block upon which we want to # build, check to see how many blocks we have claimed on this chain # with this PoET key. If we have hit the key block claim limit, then # we need to check if the key has been refreshed. key_block_claim_limit = poet_config_view.key_block_claim_limit if utils.validator_has_claimed_maximum_number_of_blocks( validator_info=validator_info, validator_state=validator_state, key_block_claim_limit=key_block_claim_limit): # Because we have hit the limit, check to see if we have already # submitted a validator registry transaction with new signup # information, and therefore a new PoET public key. If not, then # mark this PoET public key in the store as having been refreshed # and register new signup information. Regardless, since we have # hit the key block claim limit, we won't even bother initializing # a block on this chain as it will be rejected by other # validators. poet_key_state = \ self._poet_key_state_store[ PoetBlockPublisher._poet_public_key] if not poet_key_state.has_been_refreshed: LOGGER.info('Reached block claim limit (%d) for key: %s...%s', key_block_claim_limit, PoetBlockPublisher._poet_public_key[:8], PoetBlockPublisher._poet_public_key[-8:]) sealed_signup_data = poet_key_state.sealed_signup_data self._poet_key_state_store[ PoetBlockPublisher._poet_public_key] = \ PoetKeyState( sealed_signup_data=sealed_signup_data, has_been_refreshed=True) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) LOGGER.error( 'Reject building on block %s: Validator has reached maximum ' 'number of blocks with key pair.', block_header.previous_block_id[:8]) return False # Verify that we are abiding by the block claim delay (i.e., waiting a # certain number of blocks since our validator registry was added/ # updated). if utils.validator_has_claimed_too_early( validator_info=validator_info, consensus_state=consensus_state, block_number=block_header.block_num, validator_registry_view=validator_registry_view, poet_config_view=poet_config_view, block_store=self._block_cache.block_store): LOGGER.error( 'Reject building on block %s: Validator has not waited long ' 'enough since registering validator information.', block_header.previous_block_id[:8]) return False # Create a list of certificates for the wait timer. This seems to # have a little too much knowledge of the WaitTimer implementation, # but there is no use getting more than # WaitTimer.certificate_sample_length wait certificates. certificates = \ utils.build_certificate_list( block_header=block_header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module, maximum_number=WaitTimer.certificate_sample_length) # We need to create a wait timer for the block...this is what we # will check when we are asked if it is time to publish the block wait_timer = \ WaitTimer.create_wait_timer( poet_enclave_module=poet_enclave_module, validator_address=block_header.signer_pubkey, certificates=list(certificates)) # NOTE - we do the zTest after we create the wait timer because we # need its population estimate to see if this block would be accepted # by other validators based upon the zTest. # Check to see if by chance we were to be able to claim this block # if it would result in us winning more frequently than statistically # expected. If so, then refuse to initialize the block because other # validators will not accept anyway. if utils.validator_has_claimed_too_frequently( validator_info=validator_info, previous_block_id=block_header.previous_block_id, consensus_state=consensus_state, poet_config_view=poet_config_view, population_estimate=wait_timer.population_estimate, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.error( 'Reject building on block %s: Validator is claiming blocks ' 'too frequently.', block_header.previous_block_id[:8]) return False # At this point, we know that if we are able to claim the block we are # initializing, we will not be prevented from doing so because of PoET # policies. self._wait_timer = wait_timer PoetBlockPublisher._previous_block_id = None LOGGER.debug('Created wait timer: %s', self._wait_timer) return True
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activities, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): The BlockHeader to initialize. Returns: Boolean: True if the candidate block should be built. False if no candidate should be built. """ # If the previous block ID matches our cached one, that means that we # have already determined that even if we initialize the requested # block we would not be able to claim it. So, instead of wasting time # doing all of the checking again, simply short-circuit the failure so # that the validator can go do something more useful. if block_header.previous_block_id == \ PoetBlockPublisher._previous_block_id: return False PoetBlockPublisher._previous_block_id = block_header.previous_block_id # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) # Get our validator registry entry to see what PoET public key # other validators think we are using. validator_registry_view = ValidatorRegistryView(state_view) validator_info = None try: validator_id = block_header.signer_pubkey validator_info = \ validator_registry_view.get_validator_info( validator_id=validator_id) except KeyError: pass # If we don't have a validator registry entry, then check the active # key. If we don't have one, then we need to sign up. If we do have # one, then our validator registry entry has not percolated through the # system, so nothing to to but wait. active_poet_public_key = self._poet_key_state_store.active_key if validator_info is None: if active_poet_public_key is None: LOGGER.debug( 'No public key found, so going to register new signup ' 'information') self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Retrieve the key state corresponding to the PoET public key in our # validator registry entry. poet_key_state = None try: poet_key_state = \ self._poet_key_state_store[ validator_info.signup_info.poet_public_key] except (ValueError, KeyError): pass # If there is no key state associated with the PoET public key that # other validators think we should be using, then we need to create # new signup information as we have no way whatsoever to publish # blocks that other validators will accept. if poet_key_state is None: LOGGER.debug( 'PoET public key %s...%s in validator registry not found in ' 'key state store. Sign up again', validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) # We need to put fake information in the key state store for the # PoET public key the other validators think we are using so that # we don't try to keep signing up. However, we are going to mark # that key state store entry as being refreshed so that we will # never actually try to use it. dummy_data = b64encode(b'No sealed signup data').decode('utf-8') self._poet_key_state_store[ validator_info.signup_info.poet_public_key] = \ PoetKeyState( sealed_signup_data=dummy_data, has_been_refreshed=True) return False # Check the key state. If it is marked as being refreshed, then we are # waiting until our PoET public key is updated in the validator # registry and therefore we cannot publish any blocks. if poet_key_state.has_been_refreshed: LOGGER.debug( 'PoET public key %s...%s has been refreshed. Wait for new ' 'key to show up in validator registry.', validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) return False # If the PoET public key in the validator registry is not the active # one, then we need to switch the active key in the key state store. if validator_info.signup_info.poet_public_key != \ active_poet_public_key: active_poet_public_key = validator_info.signup_info.poet_public_key self._poet_key_state_store.active_key = active_poet_public_key # Ensure that the enclave is using the appropriate keys try: unsealed_poet_public_key = \ SignupInfo.unseal_signup_data( poet_enclave_module=poet_enclave_module, sealed_signup_data=poet_key_state.sealed_signup_data) except SystemError: # Signup data is unuseable LOGGER.error( 'Could not unseal signup data associated with PPK: %s..%s', active_poet_public_key[:8], active_poet_public_key[-8:]) self._poet_key_state_store.active_key = None return False assert active_poet_public_key == unsealed_poet_public_key LOGGER.debug( 'Using PoET public key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) LOGGER.debug( 'Unseal signup data: %s...%s', poet_key_state.sealed_signup_data[:8], poet_key_state.sealed_signup_data[-8:]) consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_settings_view = PoetSettingsView(state_view) # If our signup information does not pass the freshness test, then we # know that other validators will reject any blocks we try to claim so # we need to try to sign up again. if consensus_state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=poet_settings_view, block_cache=self._block_cache): LOGGER.info( 'Reject building on block %s: Validator signup information ' 'not committed in a timely manner.', block_header.previous_block_id[:8]) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Using the consensus state for the block upon which we want to # build, check to see how many blocks we have claimed on this chain # with this PoET key. If we have hit the key block claim limit, then # we need to check if the key has been refreshed. if consensus_state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=poet_settings_view): # Because we have hit the limit, check to see if we have already # submitted a validator registry transaction with new signup # information, and therefore a new PoET public key. If not, then # mark this PoET public key in the store as having been refreshed # and register new signup information. Regardless, since we have # hit the key block claim limit, we won't even bother initializing # a block on this chain as it will be rejected by other # validators. poet_key_state = self._poet_key_state_store[active_poet_public_key] if not poet_key_state.has_been_refreshed: LOGGER.info( 'Reached block claim limit for key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) sealed_signup_data = poet_key_state.sealed_signup_data self._poet_key_state_store[active_poet_public_key] = \ PoetKeyState( sealed_signup_data=sealed_signup_data, has_been_refreshed=True) # Release enclave resources for this identity # This signup will be invalid on all forks that use it, # even if there is a rollback to a point it should be valid. # A more sophisticated policy would be to release signups # only at a block depth where finality probability # is high. SignupInfo.release_signup_data( poet_enclave_module=poet_enclave_module, sealed_signup_data=sealed_signup_data) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) LOGGER.info( 'Reject building on block %s: Validator has reached maximum ' 'number of blocks with key pair.', block_header.previous_block_id[:8]) return False # Verify that we are abiding by the block claim delay (i.e., waiting a # certain number of blocks since our validator registry was added/ # updated). if consensus_state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_header.block_num, validator_registry_view=validator_registry_view, poet_settings_view=poet_settings_view, block_store=self._block_cache.block_store): LOGGER.info( 'Reject building on block %s: Validator has not waited long ' 'enough since registering validator information.', block_header.previous_block_id[:8]) return False # We need to create a wait timer for the block...this is what we # will check when we are asked if it is time to publish the block poet_key_state = self._poet_key_state_store[active_poet_public_key] sealed_signup_data = poet_key_state.sealed_signup_data previous_certificate_id = \ utils.get_previous_certificate_id( block_header=block_header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module) wait_timer = \ WaitTimer.create_wait_timer( poet_enclave_module=poet_enclave_module, sealed_signup_data=sealed_signup_data, validator_address=block_header.signer_pubkey, previous_certificate_id=previous_certificate_id, consensus_state=consensus_state, poet_settings_view=poet_settings_view) # NOTE - we do the zTest after we create the wait timer because we # need its population estimate to see if this block would be accepted # by other validators based upon the zTest. # Check to see if by chance we were to be able to claim this block # if it would result in us winning more frequently than statistically # expected. If so, then refuse to initialize the block because other # validators will not accept anyway. if consensus_state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id=block_header.previous_block_id, poet_settings_view=poet_settings_view, population_estimate=wait_timer.population_estimate( poet_settings_view=poet_settings_view), block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.info( 'Reject building on block %s: Validator is claiming blocks ' 'too frequently.', block_header.previous_block_id[:8]) return False # At this point, we know that if we are able to claim the block we are # initializing, we will not be prevented from doing so because of PoET # policies. self._wait_timer = wait_timer PoetBlockPublisher._previous_block_id = None LOGGER.debug('Created wait timer: %s', self._wait_timer) return True
def compare_forks(self, cur_fork_head, new_fork_head): """Given the head of two forks, return which should be the fork that the validator chooses. When this is called both forks consist of only valid blocks. Args: cur_fork_head (Block): The current head of the block chain. new_fork_head (Block): The head of the fork that is being evaluated. Returns: Boolean: True if the new chain should replace the current chain. False if the new chain should be discarded. """ chosen_fork_head = None state_view = \ BlockWrapper.state_view_for_block( block_wrapper=cur_fork_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) current_fork_wait_certificate = \ utils.deserialize_wait_certificate( block=cur_fork_head, poet_enclave_module=poet_enclave_module) new_fork_wait_certificate = \ utils.deserialize_wait_certificate( block=new_fork_head, poet_enclave_module=poet_enclave_module) # If we ever get a new fork head that is not a PoET block, then bail # out. This should never happen, but defensively protect against it. if new_fork_wait_certificate is None: raise \ TypeError( 'New fork head {} is not a PoET block'.format( new_fork_head.identifier[:8])) # Criterion #1: If the current fork head is not PoET, then check to see # if the new fork head is building on top of it. That would be okay. # However if not, then we don't have a good deterministic way of # choosing a winner. Again, the latter should never happen, but # defensively protect against it. if current_fork_wait_certificate is None: if new_fork_head.previous_block_id == cur_fork_head.identifier: LOGGER.info( 'Choose new fork %s: New fork head switches consensus to ' 'PoET', new_fork_head.identifier[:8]) chosen_fork_head = new_fork_head else: raise \ TypeError( 'Trying to compare a PoET block {} to a non-PoET ' 'block {} that is not the direct predecessor'.format( new_fork_head.identifier[:8], cur_fork_head.identifier[:8])) # Criterion #2: If they share the same immediate previous block, # then the one with the smaller wait duration is chosen elif cur_fork_head.previous_block_id == \ new_fork_head.previous_block_id: if current_fork_wait_certificate.duration < \ new_fork_wait_certificate.duration: LOGGER.info( 'Choose current fork %s: Current fork wait duration ' '(%f) less than new fork wait duration (%f)', cur_fork_head.header_signature[:8], current_fork_wait_certificate.duration, new_fork_wait_certificate.duration) chosen_fork_head = cur_fork_head elif new_fork_wait_certificate.duration < \ current_fork_wait_certificate.duration: LOGGER.info( 'Choose new fork %s: New fork wait duration (%f) ' 'less than current fork wait duration (%f)', new_fork_head.header_signature[:8], new_fork_wait_certificate.duration, current_fork_wait_certificate.duration) chosen_fork_head = new_fork_head # Criterion #3: If they don't share the same immediate previous # block, then the one with the higher aggregate local mean wins else: # Get the consensus state for the current fork head and the # block immediately before the new fork head (as we haven't # committed to the block yet). So that the new fork doesn't # have to fight with one hand tied behind its back, add the # new fork head's wait certificate's local mean to the # aggregate local mean for the predecessor block's consensus # state for the comparison. current_fork_consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=cur_fork_head.identifier, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) new_fork_consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=new_fork_head.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) new_fork_aggregate_local_mean = \ new_fork_consensus_state.aggregate_local_mean + \ new_fork_wait_certificate.local_mean if current_fork_consensus_state.aggregate_local_mean > \ new_fork_aggregate_local_mean: LOGGER.info( 'Choose current fork %s: Current fork aggregate ' 'local mean (%f) greater than new fork aggregate ' 'local mean (%f)', cur_fork_head.header_signature[:8], current_fork_consensus_state.aggregate_local_mean, new_fork_aggregate_local_mean) chosen_fork_head = cur_fork_head elif new_fork_aggregate_local_mean > \ current_fork_consensus_state.aggregate_local_mean: LOGGER.info( 'Choose new fork %s: New fork aggregate local mean ' '(%f) greater than current fork aggregate local mean ' '(%f)', new_fork_head.header_signature[:8], new_fork_aggregate_local_mean, current_fork_consensus_state.aggregate_local_mean) chosen_fork_head = new_fork_head # Criterion #4: If we have gotten to this point and we have not chosen # yet, we are going to fall back on using the block identifiers # (header signatures) . The lexicographically larger one will be the # chosen one. The chance that they are equal are infinitesimally # small. if chosen_fork_head is None: if cur_fork_head.header_signature > \ new_fork_head.header_signature: LOGGER.info( 'Choose current fork %s: Current fork header signature' '(%s) greater than new fork header signature (%s)', cur_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8]) chosen_fork_head = cur_fork_head else: LOGGER.info( 'Choose new fork %s: New fork header signature (%s) ' 'greater than current fork header signature (%s)', new_fork_head.header_signature[:8], new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8]) chosen_fork_head = new_fork_head # Now that we have chosen a fork for the chain head, if we chose the # new fork and it is a PoET block (i.e., it has a wait certificate), # we need to create consensus state store information for the new # fork's chain head. if chosen_fork_head == new_fork_head: # Get the state view for the previous block in the chain so we can # create a PoET enclave previous_block = None try: previous_block = \ self._block_cache[new_fork_head.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) validator_registry_view = ValidatorRegistryView(state_view) try: # Get the validator info for the validator that claimed the # fork head validator_info = \ validator_registry_view.get_validator_info( new_fork_head.header.signer_pubkey) # Get the consensus state for the new fork head's previous # block, let the consensus state update itself appropriately # based upon the validator claiming a block, and then # associate the consensus state with the new block in the # store. consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=new_fork_head.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) consensus_state.validator_did_claim_block( validator_info=validator_info, wait_certificate=new_fork_wait_certificate, poet_settings_view=PoetSettingsView(state_view)) self._consensus_state_store[new_fork_head.identifier] = \ consensus_state LOGGER.debug( 'Create consensus state: BID=%s, ALM=%f, TBCC=%d', new_fork_head.identifier[:8], consensus_state.aggregate_local_mean, consensus_state.total_block_claim_count) except KeyError: # This _should_ never happen. The new potential fork head # has to have been a PoET block and for it to be verified # by the PoET block verifier, it must have been signed by # validator in the validator registry. If not found, we # are going to just stick with the current fork head. LOGGER.error( 'New fork head claimed by validator not in validator ' 'registry: %s...%s', new_fork_head.header.signer_pubkey[:8], new_fork_head.header.signer_pubkey[-8:]) chosen_fork_head = cur_fork_head return chosen_fork_head == new_fork_head
def verify_block(self, block_wrapper): """Check that the block received conforms to the consensus rules. Args: block_wrapper (BlockWrapper): The block to validate. Returns: Boolean: True if the Block is valid, False if the block is invalid. """ # Get the state view for the previous block in the chain so we can # create a PoET enclave and validator registry view previous_block = None try: previous_block = \ self._block_cache[block_wrapper.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module(state_view) validator_registry_view = ValidatorRegistryView(state_view) try: # Grab the validator info based upon the block signer's public # key try: validator_info = \ validator_registry_view.get_validator_info( block_wrapper.header.signer_pubkey) except KeyError: raise \ ValueError( 'Received block from an unregistered validator ' '{}...{}'.format( block_wrapper.header.signer_pubkey[:8], block_wrapper.header.signer_pubkey[-8:])) LOGGER.debug( 'Block Signer Name=%s, ID=%s...%s, PoET public key=' '%s...%s', validator_info.name, validator_info.id[:8], validator_info.id[-8:], validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # Create a list of certificates leading up to this block. # This seems to have a little too much knowledge of the # WaitTimer implementation, but there is no use getting more # than WaitTimer.certificate_sample_length wait certificates. certificates = \ utils.build_certificate_list( block_header=block_wrapper.header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module, maximum_number=WaitTimer.certificate_sample_length) # For the candidate block, reconstitute the wait certificate # and verify that it is valid wait_certificate = \ utils.deserialize_wait_certificate( block=block_wrapper, poet_enclave_module=poet_enclave_module) if wait_certificate is None: raise \ ValueError( 'Being asked to verify a block that was not ' 'created by PoET consensus module') poet_public_key = \ validator_info.signup_info.poet_public_key wait_certificate.check_valid( poet_enclave_module=poet_enclave_module, certificates=certificates, poet_public_key=poet_public_key) # Get the consensus state for the block that is being built # upon, fetch the validator state for this validator, and then # see if that validator has already claimed the key bock limit # for its current PoET key pair. If so, then we reject the # block. consensus_state = \ utils.get_consensus_state_for_block_id( block_id=block_wrapper.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) validator_state = \ utils.get_current_validator_state( validator_info=validator_info, consensus_state=consensus_state, block_cache=self._block_cache) poet_config_view = PoetConfigView(state_view=state_view) if validator_state.poet_public_key == poet_public_key and \ validator_state.key_block_claim_count >= \ poet_config_view.key_block_claim_limit: raise \ ValueError( 'Validator {} has already reached claim block limit ' 'for current PoET key pair: {} >= {}'.format( validator_info.name, validator_state.key_block_claim_count, poet_config_view.key_block_claim_limit)) # While having a block claim delay is nice, it turns out that in # practice the claim delay should not be more than one less than # the number of validators. It helps to imagine the scenario # where each validator hits their block claim limit in sequential # blocks and their new validator registry information is updated # in the following block by another validator, assuming that there # were no forks. If there are N validators, once all N validators # have updated their validator registry information, there will # have been N-1 block commits and the Nth validator will only be # able to get its updated validator registry information updated # if the first validator that kicked this off is now able to claim # a block. If the block claim delay was greater than or equal to # the number of validators, at this point no validators would be # able to claim a block. number_of_validators = \ len(validator_registry_view.get_validators()) block_claim_delay = \ min( poet_config_view.block_claim_delay, number_of_validators - 1) # While a validator network is starting up, we need to be careful # about applying the block claim delay because if we are too # aggressive we will get ourselves into a situation where the # block claim delay will prevent any validators from claiming # blocks. So, until we get at least block_claim_delay blocks # we are going to choose not to enforce the delay. if consensus_state.total_block_claim_count <= block_claim_delay: LOGGER.debug( 'Skipping block claim delay check. Only %d block(s) in ' 'the chain. Claim delay is %d block(s). %d validator(s) ' 'registered.', consensus_state.total_block_claim_count, block_claim_delay, number_of_validators) return True blocks_since_registration = \ block_wrapper.block_num - \ validator_state.commit_block_number - 1 if block_claim_delay > blocks_since_registration: raise \ ValueError( 'Validator {} claiming too early. Block: {}, ' 'registered in: {}, wait until after: {}.'.format( validator_info.name, block_wrapper.block_num, validator_state.commit_block_number, validator_state.commit_block_number + block_claim_delay)) LOGGER.debug( '%d block(s) claimed since %s was registered and block ' 'claim delay is %d block(s). Check passed.', blocks_since_registration, validator_info.name, block_claim_delay) except ValueError as error: LOGGER.error('Failed to verify block: %s', error) return False return True
def verify_block(self, block_wrapper): """Check that the block received conforms to the consensus rules. Args: block_wrapper (BlockWrapper): The block to validate. Returns: Boolean: True if the Block is valid, False if the block is invalid. """ # Get the state view for the previous block in the chain so we can # create a PoET enclave and validator registry view previous_block = None try: previous_block = \ self._block_cache[block_wrapper.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module(state_view) validator_registry_view = ValidatorRegistryView(state_view) try: # Grab the validator info based upon the block signer's public # key try: validator_info = \ validator_registry_view.get_validator_info( block_wrapper.header.signer_pubkey) except KeyError: raise \ ValueError( 'Received block from an unregistered validator ' '{}...{}'.format( block_wrapper.header.signer_pubkey[:8], block_wrapper.header.signer_pubkey[-8:])) LOGGER.debug( 'Block Signer Name=%s, ID=%s...%s, PoET public key=' '%s...%s', validator_info.name, validator_info.id[:8], validator_info.id[-8:], validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # Create a list of certificates leading up to this block. # This seems to have a little too much knowledge of the # WaitTimer implementation, but there is no use getting more # than WaitTimer.certificate_sample_length wait certificates. certificates = \ utils.build_certificate_list( block_header=block_wrapper.header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module, maximum_number=WaitTimer.certificate_sample_length) # For the candidate block, reconstitute the wait certificate # and verify that it is valid wait_certificate = \ utils.deserialize_wait_certificate( block=block_wrapper, poet_enclave_module=poet_enclave_module) if wait_certificate is None: raise \ ValueError( 'Being asked to verify a block that was not ' 'created by PoET consensus module') poet_public_key = \ validator_info.signup_info.poet_public_key wait_certificate.check_valid( poet_enclave_module=poet_enclave_module, certificates=certificates, poet_public_key=poet_public_key) except ValueError as error: LOGGER.error('Failed to verify block: %s', error) return False return True
def _build_candidate_block(self, chain_head): """ Build a candidate block and construct the consensus object to validate it. :param chain_head: The block to build on top of. :return: (BlockBuilder) - The candidate block in a BlockBuilder wrapper. """ state_view = BlockWrapper.state_view_for_block( chain_head, self._state_view_factory) consensus_module = ConsensusFactory.get_configured_consensus_module( chain_head.header_signature, state_view) # using chain_head so so we can use the setting_cache max_batches = int(self._settings_cache.get_setting( 'sawtooth.publisher.max_batches_per_block', chain_head.state_root_hash, default_value=0)) public_key = self._identity_signer.get_public_key().as_hex() consensus = consensus_module.\ BlockPublisher(block_cache=self._block_cache, state_view_factory=self._state_view_factory, batch_publisher=self._batch_publisher, data_dir=self._data_dir, config_dir=self._config_dir, validator_id=public_key) batch_injectors = [] if self._batch_injector_factory is not None: batch_injectors = self._batch_injector_factory.create_injectors( chain_head.identifier) if batch_injectors: LOGGER.debug("Loaded batch injectors: %s", batch_injectors) block_header = BlockHeader( block_num=chain_head.block_num + 1, previous_block_id=chain_head.header_signature, signer_public_key=public_key) block_builder = BlockBuilder(block_header) if not consensus.initialize_block(block_builder.block_header): if not self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = True LOGGER.debug("Consensus not ready to build candidate block.") return None if self._logging_states.consensus_not_ready: self._logging_states.consensus_not_ready = False LOGGER.debug("Consensus is ready to build candidate block.") # create a new scheduler scheduler = self._transaction_executor.create_scheduler( self._squash_handler, chain_head.state_root_hash) # build the TransactionCommitCache committed_txn_cache = TransactionCommitCache( self._block_cache.block_store) self._transaction_executor.execute(scheduler) self._candidate_block = _CandidateBlock( self._block_cache.block_store, consensus, scheduler, committed_txn_cache, block_builder, max_batches, batch_injectors, SettingsView(state_view), public_key) for batch in self._pending_batches: if self._candidate_block.can_add_batch: self._candidate_block.add_batch(batch) else: break