def _handle_registration_timeout(self, block_header, poet_enclave_module, state_view, signup_nonce, poet_public_key): # See if a registration attempt has timed out. Assumes the caller has # checked for a committed registration and did not find it. # If it has timed out then this method will re-register. consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_settings_view = PoetSettingsView(state_view) if consensus_state.signup_attempt_timed_out( signup_nonce, poet_settings_view, self._block_cache): LOGGER.error('My poet registration using PPK %s has not ' 'committed by block %s. Create new registration', poet_public_key, block_header.previous_block_id) del self._poet_key_state_store[poet_public_key] self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module)
def test_ztest_maximum_win_deviation(self, mock_settings_view): """Verify that retrieving zTest maximum win deviation works for invalid cases (missing, invalid format, invalid value) as well as valid case. """ poet_settings_view = PoetSettingsView(state_view=None) # Simulate an underlying error parsing value mock_settings_view.return_value.get_setting.side_effect = \ ValueError('bad value') self.assertEqual( poet_settings_view.ztest_maximum_win_deviation, TestPoetSettingsView. _EXPECTED_DEFAULT_ZTEST_MAXIMUM_WIN_DEVIATION_) _, kwargs = \ mock_settings_view.return_value.get_setting.call_args self.assertEqual( kwargs['key'], 'sawtooth.poet.ztest_maximum_win_deviation') self.assertEqual( kwargs['default_value'], TestPoetSettingsView. _EXPECTED_DEFAULT_ZTEST_MAXIMUM_WIN_DEVIATION_) self.assertEqual(kwargs['value_type'], float) # Underlying config setting is not a valid value mock_settings_view.return_value.get_setting.side_effect = None for bad_value in \ [-100.0, -1.0, 0.0, float('nan'), float('inf'), float('-inf')]: mock_settings_view.return_value.get_setting.return_value = \ bad_value poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual( poet_settings_view.ztest_maximum_win_deviation, TestPoetSettingsView. _EXPECTED_DEFAULT_ZTEST_MAXIMUM_WIN_DEVIATION_) # Underlying config setting is a valid value mock_settings_view.return_value.get_setting.return_value = 2.575 poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual(poet_settings_view.ztest_maximum_win_deviation, 2.575)
def test_population_estimate_sample_size(self, mock_settings_view): """Verify that retrieving population estimate sample size works for invalid cases (missing, invalid format, invalid value) as well as valid case. """ poet_settings_view = PoetSettingsView(state_view=None) # Simulate an underlying error parsing value mock_settings_view.return_value.get_setting.side_effect = \ ValueError('bad value') self.assertEqual( poet_settings_view.population_estimate_sample_size, TestPoetSettingsView. _EXPECTED_DEFAULT_POPULATION_ESTIMATE_SAMPLE_SIZE_) _, kwargs = \ mock_settings_view.return_value.get_setting.call_args self.assertEqual( kwargs['key'], 'sawtooth.poet.population_estimate_sample_size') self.assertEqual( kwargs['default_value'], TestPoetSettingsView. _EXPECTED_DEFAULT_POPULATION_ESTIMATE_SAMPLE_SIZE_) self.assertEqual(kwargs['value_type'], int) # Underlying config setting is not a valid value mock_settings_view.return_value.get_setting.side_effect = None for bad_value in [-100, -1, 0]: mock_settings_view.return_value.get_setting.return_value = \ bad_value poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual( poet_settings_view.population_estimate_sample_size, TestPoetSettingsView. _EXPECTED_DEFAULT_POPULATION_ESTIMATE_SAMPLE_SIZE_) # Underlying config setting is a valid value mock_settings_view.return_value.get_setting.return_value = 1 poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual(poet_settings_view.population_estimate_sample_size, 1)
def test_signup_commit_maximum_delay(self, mock_settings_view): """Verify that retrieving signup commit maximum delay works for invalid cases (missing, invalid format, invalid value) as well as valid case. """ poet_settings_view = PoetSettingsView(state_view=None) # Simulate an underlying error parsing value mock_settings_view.return_value.get_setting.side_effect = \ ValueError('bad value') self.assertEqual( poet_settings_view.signup_commit_maximum_delay, TestPoetSettingsView. _EXPECTED_DEFAULT_SIGNUP_COMMIT_MAXIMUM_DELAY_) _, kwargs = \ mock_settings_view.return_value.get_setting.call_args self.assertEqual( kwargs['key'], 'sawtooth.poet.signup_commit_maximum_delay') self.assertEqual( kwargs['default_value'], TestPoetSettingsView. _EXPECTED_DEFAULT_SIGNUP_COMMIT_MAXIMUM_DELAY_) self.assertEqual(kwargs['value_type'], int) # Underlying config setting is not a valid value mock_settings_view.return_value.get_setting.side_effect = None for bad_value in [-100, -1]: mock_settings_view.return_value.get_setting.return_value = \ bad_value poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual( poet_settings_view.signup_commit_maximum_delay, TestPoetSettingsView. _EXPECTED_DEFAULT_SIGNUP_COMMIT_MAXIMUM_DELAY_) # Underlying config setting is a valid value mock_settings_view.return_value.get_setting.return_value = 123 poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual(poet_settings_view.signup_commit_maximum_delay, 123)
def test_ztest_minimum_win_count(self, mock_settings_view): """Verify that retrieving zTest minimum win observations works for invalid cases (missing, invalid format, invalid value) as well as valid case. """ poet_settings_view = PoetSettingsView(state_view=None) # Simulate an underlying error parsing value mock_settings_view.return_value.get_setting.side_effect = \ ValueError('bad value') self.assertEqual( poet_settings_view.ztest_minimum_win_count, TestPoetSettingsView._EXPECTED_DEFAULT_ZTEST_MINIMUM_WIN_COUNT_) _, kwargs = \ mock_settings_view.return_value.get_setting.call_args self.assertEqual( kwargs['key'], 'sawtooth.poet.ztest_minimum_win_count') self.assertEqual( kwargs['default_value'], TestPoetSettingsView._EXPECTED_DEFAULT_ZTEST_MINIMUM_WIN_COUNT_) self.assertEqual(kwargs['value_type'], int) # Underlying config setting is not a valid value mock_settings_view.return_value.get_setting.side_effect = None for bad_value in [-100, -1]: mock_settings_view.return_value.get_setting.return_value = \ bad_value poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual( poet_settings_view.ztest_minimum_win_count, TestPoetSettingsView. _EXPECTED_DEFAULT_ZTEST_MINIMUM_WIN_COUNT_) # Underlying config setting is a valid value mock_settings_view.return_value.get_setting.return_value = 0 poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual(poet_settings_view.ztest_minimum_win_count, 0)
def test_block_claim_delay(self, mock_settings_view): """Verify that retrieving block claim delay works for invalid cases (missing, invalid format, invalid value) as well as valid case. """ poet_settings_view = PoetSettingsView(state_view=None) # Simulate an underlying error parsing value mock_settings_view.return_value.get_setting.side_effect = \ ValueError('bad value') self.assertEqual( poet_settings_view.block_claim_delay, TestPoetSettingsView._EXPECTED_DEFAULT_BLOCK_CLAIM_DELAY_) _, kwargs = \ mock_settings_view.return_value.get_setting.call_args self.assertEqual(kwargs['key'], 'sawtooth.poet.block_claim_delay') self.assertEqual( kwargs['default_value'], TestPoetSettingsView._EXPECTED_DEFAULT_BLOCK_CLAIM_DELAY_) self.assertEqual(kwargs['value_type'], int) # Underlying config setting is not a valid value mock_settings_view.return_value.get_setting.side_effect = None for bad_value in [-100, -1]: mock_settings_view.return_value.get_setting.return_value = \ bad_value poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual( poet_settings_view.block_claim_delay, TestPoetSettingsView._EXPECTED_DEFAULT_BLOCK_CLAIM_DELAY_) # Underlying config setting is a valid value poet_settings_view = PoetSettingsView(state_view=None) mock_settings_view.return_value.get_setting.return_value = 0 self.assertEqual(poet_settings_view.block_claim_delay, 0) poet_settings_view = PoetSettingsView(state_view=None) mock_settings_view.return_value.get_setting.return_value = 1 self.assertEqual(poet_settings_view.block_claim_delay, 1)
def test_target_wait_time(self, mock_settings_view): """Verify that retrieving target wait time works for invalid cases (missing, invalid format, invalid value) as well as valid case. """ poet_settings_view = PoetSettingsView(state_view=None) # Simulate an underlying error parsing value mock_settings_view.return_value.get_setting.side_effect = \ ValueError('bad value') self.assertEqual( poet_settings_view.target_wait_time, TestPoetSettingsView._EXPECTED_DEFAULT_TARGET_WAIT_TIME_) _, kwargs = \ mock_settings_view.return_value.get_setting.call_args self.assertEqual(kwargs['key'], 'sawtooth.poet.target_wait_time') self.assertEqual( kwargs['default_value'], TestPoetSettingsView._EXPECTED_DEFAULT_TARGET_WAIT_TIME_) self.assertEqual(kwargs['value_type'], float) # Underlying config setting is not a valid value mock_settings_view.return_value.get_setting.side_effect = None for bad_value in \ [-100.0, -1.0, 0.0, float('nan'), float('inf'), float('-inf')]: mock_settings_view.return_value.get_setting.return_value = \ bad_value poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual( poet_settings_view.target_wait_time, TestPoetSettingsView._EXPECTED_DEFAULT_TARGET_WAIT_TIME_) # Underlying config setting is a valid value mock_settings_view.return_value.get_setting.return_value = 3.1415 poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual(poet_settings_view.target_wait_time, 3.1415)
def test_enclave_module_name(self, mock_settings_view): """Verify that retrieving enclave module name works for invalid cases (missing, invalid format, invalid value) as well as valid case. """ poet_settings_view = PoetSettingsView(state_view=None) # Simulate an underlying error parsing value mock_settings_view.return_value.get_setting.side_effect = \ ValueError('bad value') self.assertEqual( poet_settings_view.enclave_module_name, TestPoetSettingsView._EXPECTED_DEFAULT_ENCLAVE_MODULE_NAME_) _, kwargs = \ mock_settings_view.return_value.get_setting.call_args self.assertEqual(kwargs['key'], 'sawtooth.poet.enclave_module_name') self.assertEqual( kwargs['default_value'], TestPoetSettingsView._EXPECTED_DEFAULT_ENCLAVE_MODULE_NAME_) self.assertEqual(kwargs['value_type'], str) # Underlying config setting is not a valid value mock_settings_view.return_value.get_setting.side_effect = None mock_settings_view.return_value.get_setting.return_value = '' poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual( poet_settings_view.enclave_module_name, TestPoetSettingsView._EXPECTED_DEFAULT_ENCLAVE_MODULE_NAME_) # Underlying config setting is a valid value mock_settings_view.return_value.get_setting.return_value = \ 'valid value' poet_settings_view = PoetSettingsView(state_view=None) self.assertEqual(poet_settings_view.enclave_module_name, 'valid value')
def get_poet_enclave_module(cls, state_view, config_dir, data_dir): """Returns the PoET enclave module based upon the corresponding value set by the sawtooth_settings transaction family. If no PoET enclave module has been set in the configuration, it defaults to the PoET enclave simulator. Args: state_view (StateView): The current state view. config_dir (str): path to location where configuration for the poet enclave module can be found. data_dir (str): path to location where data for the poet enclave module can be found. Returns: module: The configured PoET enclave module, or the PoET enclave simulator module if none configured. Raises: ImportError: Raised if the given module_name does not correspond to a consensus implementation. """ with cls._lock: # We are only going to load the PoET enclave if we haven't already # done so. Otherwise, we are just going to return the previously- # loaded enclave module. if cls._poet_enclave_module is None: # Get the configured PoET enclave module name. poet_settings_view = PoetSettingsView(state_view) module_name = poet_settings_view.enclave_module_name LOGGER.info( 'Load PoET enclave module: %s; ' 'Target wait time: %f; ' 'Initial wait time: %f; ' 'Population estimate sample size: %d; ' 'Minimum wait time: %f', module_name, poet_settings_view.target_wait_time, poet_settings_view.initial_wait_time, poet_settings_view.population_estimate_sample_size, poet_settings_view.minimum_wait_time) # Load and initialize the module module = importlib.import_module(module_name) module.initialize(config_dir, data_dir) cls._poet_enclave_module = module return cls._poet_enclave_module
def consensus_state_for_block_id(block_id, block_cache, state_view_factory, consensus_state_store, poet_enclave_module): """Returns the consensus state for the block referenced by block ID, creating it from the consensus state history if necessary. Args: block_id (str): The ID of the block for which consensus state will be returned. block_cache (BlockCache): The block store cache state_view_factory (StateViewFactory): A factory that can be used to create state view object corresponding to blocks consensus_state_store (ConsensusStateStore): The consensus state store that is used to store interim consensus state created up to resulting consensus state poet_enclave_module (module): The PoET enclave module Returns: ConsensusState object representing the consensus state for the block referenced by block_id """ consensus_state = None previous_wait_certificate = None blocks = collections.OrderedDict() # Starting at the chain head, walk the block store backwards until we # either get to the root or we get a block for which we have already # created consensus state current_id = block_id while True: block = \ ConsensusState._block_for_id( block_id=current_id, block_cache=block_cache) if block is None: break # Try to fetch the consensus state. If that succeeds, we can # stop walking back as we can now build on that consensus # state. consensus_state = consensus_state_store.get(block_id=current_id) if consensus_state is not None: break wait_certificate = \ utils.deserialize_wait_certificate( block=block, poet_enclave_module=poet_enclave_module) # If this is a PoET block (i.e., it has a wait certificate), get # the validator info for the validator that signed this block and # add the block information we will need to set validator state in # the block's consensus state. if wait_certificate is not None: state_view = \ state_view_factory.create_view( state_root_hash=block.state_root_hash) validator_registry_view = \ ValidatorRegistryView(state_view=state_view) validator_info = \ validator_registry_view.get_validator_info( validator_id=block.header.signer_pubkey) LOGGER.debug( 'We need to build consensus state for block: %s...%s', current_id[:8], current_id[-8:]) blocks[current_id] = \ ConsensusState._BlockInfo( wait_certificate=wait_certificate, validator_info=validator_info, poet_settings_view=PoetSettingsView(state_view)) # Otherwise, this is a non-PoET block. If we don't have any blocks # yet or the last block we processed was a PoET block, put a # placeholder in the list so that when we get to it we know that we # need to reset the statistics. elif not blocks or previous_wait_certificate is not None: blocks[current_id] = \ ConsensusState._BlockInfo( wait_certificate=None, validator_info=None, poet_settings_view=None) previous_wait_certificate = wait_certificate # Move to the previous block current_id = block.previous_block_id # At this point, if we have not found any consensus state, we need to # create default state from which we can build upon if consensus_state is None: consensus_state = ConsensusState() # Now, walk through the blocks for which we were supposed to create # consensus state, from oldest to newest (i.e., in the reverse order in # which they were added), and store state for PoET blocks so that the # next time we don't have to walk so far back through the block chain. for current_id, block_info in reversed(blocks.items()): # If the block was not a PoET block (i.e., didn't have a wait # certificate), reset the consensus state statistics. We are not # going to store this in the consensus state store, but we will use # it as the starting for the next PoET block. if block_info.wait_certificate is None: consensus_state = ConsensusState() # Otherwise, let the consensus state update itself appropriately # based upon the validator claiming a block, and then associate the # consensus state with the new block in the store. # validator state for the validator which claimed the block, create # updated validator state for the validator, set/update the # validator state in the consensus state object, and then associate # the consensus state with the corresponding block in the consensus # state store. else: consensus_state.validator_did_claim_block( validator_info=block_info.validator_info, wait_certificate=block_info.wait_certificate, poet_settings_view=block_info.poet_settings_view) consensus_state_store[current_id] = consensus_state LOGGER.debug('Create consensus state: BID=%s, ALM=%f, TBCC=%d', current_id[:8], consensus_state.aggregate_local_mean, consensus_state.total_block_claim_count) return consensus_state
def initialize_block(self, block_header): """Do initialization necessary for the consensus to claim a block, this may include initiating voting activities, starting proof of work hash generation, or create a PoET wait timer. Args: block_header (BlockHeader): The BlockHeader to initialize. Returns: Boolean: True if the candidate block should be built. False if no candidate should be built. """ # If the previous block ID matches our cached one, that means that we # have already determined that even if we initialize the requested # block we would not be able to claim it. So, instead of wasting time # doing all of the checking again, simply short-circuit the failure so # that the validator can go do something more useful. if block_header.previous_block_id == \ PoetBlockPublisher._previous_block_id: return False PoetBlockPublisher._previous_block_id = block_header.previous_block_id # Using the current chain head, we need to create a state view so we # can create a PoET enclave. state_view = \ BlockWrapper.state_view_for_block( block_wrapper=self._block_cache.block_store.chain_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) # Get our validator registry entry to see what PoET public key # other validators think we are using. validator_registry_view = ValidatorRegistryView(state_view) validator_info = None try: validator_id = block_header.signer_pubkey validator_info = \ validator_registry_view.get_validator_info( validator_id=validator_id) except KeyError: pass # If we don't have a validator registry entry, then check the active # key. If we don't have one, then we need to sign up. If we do have # one, then our validator registry entry has not percolated through the # system, so nothing to to but wait. active_poet_public_key = self._poet_key_state_store.active_key if validator_info is None: if active_poet_public_key is None: LOGGER.debug( 'No public key found, so going to register new signup ' 'information') self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Retrieve the key state corresponding to the PoET public key in our # validator registry entry. poet_key_state = None try: poet_key_state = \ self._poet_key_state_store[ validator_info.signup_info.poet_public_key] except (ValueError, KeyError): pass # If there is no key state associated with the PoET public key that # other validators think we should be using, then we need to create # new signup information as we have no way whatsoever to publish # blocks that other validators will accept. if poet_key_state is None: LOGGER.debug( 'PoET public key %s...%s in validator registry not found in ' 'key state store. Sign up again', validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) # We need to put fake information in the key state store for the # PoET public key the other validators think we are using so that # we don't try to keep signing up. However, we are going to mark # that key state store entry as being refreshed so that we will # never actually try to use it. dummy_data = b64encode(b'No sealed signup data').decode('utf-8') self._poet_key_state_store[ validator_info.signup_info.poet_public_key] = \ PoetKeyState( sealed_signup_data=dummy_data, has_been_refreshed=True) return False # Check the key state. If it is marked as being refreshed, then we are # waiting until our PoET public key is updated in the validator # registry and therefore we cannot publish any blocks. if poet_key_state.has_been_refreshed: LOGGER.debug( 'PoET public key %s...%s has been refreshed. Wait for new ' 'key to show up in validator registry.', validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) return False # If the PoET public key in the validator registry is not the active # one, then we need to switch the active key in the key state store. if validator_info.signup_info.poet_public_key != \ active_poet_public_key: active_poet_public_key = validator_info.signup_info.poet_public_key self._poet_key_state_store.active_key = active_poet_public_key # Ensure that the enclave is using the appropriate keys try: unsealed_poet_public_key = \ SignupInfo.unseal_signup_data( poet_enclave_module=poet_enclave_module, sealed_signup_data=poet_key_state.sealed_signup_data) except SystemError: # Signup data is unuseable LOGGER.error( 'Could not unseal signup data associated with PPK: %s..%s', active_poet_public_key[:8], active_poet_public_key[-8:]) self._poet_key_state_store.active_key = None return False assert active_poet_public_key == unsealed_poet_public_key LOGGER.debug( 'Using PoET public key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) LOGGER.debug( 'Unseal signup data: %s...%s', poet_key_state.sealed_signup_data[:8], poet_key_state.sealed_signup_data[-8:]) consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_header.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_settings_view = PoetSettingsView(state_view) # If our signup information does not pass the freshness test, then we # know that other validators will reject any blocks we try to claim so # we need to try to sign up again. if consensus_state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=poet_settings_view, block_cache=self._block_cache): LOGGER.info( 'Reject building on block %s: Validator signup information ' 'not committed in a timely manner.', block_header.previous_block_id[:8]) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) return False # Using the consensus state for the block upon which we want to # build, check to see how many blocks we have claimed on this chain # with this PoET key. If we have hit the key block claim limit, then # we need to check if the key has been refreshed. if consensus_state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=poet_settings_view): # Because we have hit the limit, check to see if we have already # submitted a validator registry transaction with new signup # information, and therefore a new PoET public key. If not, then # mark this PoET public key in the store as having been refreshed # and register new signup information. Regardless, since we have # hit the key block claim limit, we won't even bother initializing # a block on this chain as it will be rejected by other # validators. poet_key_state = self._poet_key_state_store[active_poet_public_key] if not poet_key_state.has_been_refreshed: LOGGER.info( 'Reached block claim limit for key: %s...%s', active_poet_public_key[:8], active_poet_public_key[-8:]) sealed_signup_data = poet_key_state.sealed_signup_data self._poet_key_state_store[active_poet_public_key] = \ PoetKeyState( sealed_signup_data=sealed_signup_data, has_been_refreshed=True) # Release enclave resources for this identity # This signup will be invalid on all forks that use it, # even if there is a rollback to a point it should be valid. # A more sophisticated policy would be to release signups # only at a block depth where finality probability # is high. SignupInfo.release_signup_data( poet_enclave_module=poet_enclave_module, sealed_signup_data=sealed_signup_data) self._register_signup_information( block_header=block_header, poet_enclave_module=poet_enclave_module) LOGGER.info( 'Reject building on block %s: Validator has reached maximum ' 'number of blocks with key pair.', block_header.previous_block_id[:8]) return False # Verify that we are abiding by the block claim delay (i.e., waiting a # certain number of blocks since our validator registry was added/ # updated). if consensus_state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_header.block_num, validator_registry_view=validator_registry_view, poet_settings_view=poet_settings_view, block_store=self._block_cache.block_store): LOGGER.info( 'Reject building on block %s: Validator has not waited long ' 'enough since registering validator information.', block_header.previous_block_id[:8]) return False # We need to create a wait timer for the block...this is what we # will check when we are asked if it is time to publish the block poet_key_state = self._poet_key_state_store[active_poet_public_key] sealed_signup_data = poet_key_state.sealed_signup_data previous_certificate_id = \ utils.get_previous_certificate_id( block_header=block_header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module) wait_timer = \ WaitTimer.create_wait_timer( poet_enclave_module=poet_enclave_module, sealed_signup_data=sealed_signup_data, validator_address=block_header.signer_pubkey, previous_certificate_id=previous_certificate_id, consensus_state=consensus_state, poet_settings_view=poet_settings_view) # NOTE - we do the zTest after we create the wait timer because we # need its population estimate to see if this block would be accepted # by other validators based upon the zTest. # Check to see if by chance we were to be able to claim this block # if it would result in us winning more frequently than statistically # expected. If so, then refuse to initialize the block because other # validators will not accept anyway. if consensus_state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id=block_header.previous_block_id, poet_settings_view=poet_settings_view, population_estimate=wait_timer.population_estimate( poet_settings_view=poet_settings_view), block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.info( 'Reject building on block %s: Validator is claiming blocks ' 'too frequently.', block_header.previous_block_id[:8]) return False # At this point, we know that if we are able to claim the block we are # initializing, we will not be prevented from doing so because of PoET # policies. self._wait_timer = wait_timer PoetBlockPublisher._previous_block_id = None LOGGER.debug('Created wait timer: %s', self._wait_timer) return True
def compare_forks(self, cur_fork_head, new_fork_head): """Given the head of two forks, return which should be the fork that the validator chooses. When this is called both forks consist of only valid blocks. Args: cur_fork_head (Block): The current head of the block chain. new_fork_head (Block): The head of the fork that is being evaluated. Returns: Boolean: True if the new chain should replace the current chain. False if the new chain should be discarded. """ chosen_fork_head = None state_view = \ BlockWrapper.state_view_for_block( block_wrapper=cur_fork_head, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) current_fork_wait_certificate = \ utils.deserialize_wait_certificate( block=cur_fork_head, poet_enclave_module=poet_enclave_module) new_fork_wait_certificate = \ utils.deserialize_wait_certificate( block=new_fork_head, poet_enclave_module=poet_enclave_module) # If we ever get a new fork head that is not a PoET block, then bail # out. This should never happen, but defensively protect against it. if new_fork_wait_certificate is None: raise \ TypeError( 'New fork head {} is not a PoET block'.format( new_fork_head.identifier[:8])) # Criterion #1: If the current fork head is not PoET, then check to see # if the new fork head is building on top of it. That would be okay. # However if not, then we don't have a good deterministic way of # choosing a winner. Again, the latter should never happen, but # defensively protect against it. if current_fork_wait_certificate is None: if new_fork_head.previous_block_id == cur_fork_head.identifier: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork head switches consensus to PoET', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8]) chosen_fork_head = new_fork_head else: raise \ TypeError( 'Trying to compare a PoET block {} to a non-PoET ' 'block {} that is not the direct predecessor'.format( new_fork_head.identifier[:8], cur_fork_head.identifier[:8])) # Criterion #2: If they share the same immediate previous block, # then the one with the smaller wait duration is chosen elif cur_fork_head.previous_block_id == \ new_fork_head.previous_block_id: if current_fork_wait_certificate.duration < \ new_fork_wait_certificate.duration: LOGGER.info( 'Choose current fork %s over new fork %s: ' 'Current fork wait duration (%f) less than new fork wait ' 'duration (%f)', cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], current_fork_wait_certificate.duration, new_fork_wait_certificate.duration) chosen_fork_head = cur_fork_head elif new_fork_wait_certificate.duration < \ current_fork_wait_certificate.duration: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork wait duration (%f) less than current fork wait ' 'duration (%f)', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_wait_certificate.duration, current_fork_wait_certificate.duration) chosen_fork_head = new_fork_head # Criterion #3: If they don't share the same immediate previous # block, then the one with the higher aggregate local mean wins else: # Get the consensus state for the current fork head and the # block immediately before the new fork head (as we haven't # committed to the block yet). So that the new fork doesn't # have to fight with one hand tied behind its back, add the # new fork head's wait certificate's local mean to the # aggregate local mean for the predecessor block's consensus # state for the comparison. current_fork_consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=cur_fork_head.identifier, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) new_fork_consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=new_fork_head.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) new_fork_aggregate_local_mean = \ new_fork_consensus_state.aggregate_local_mean + \ new_fork_wait_certificate.local_mean if current_fork_consensus_state.aggregate_local_mean > \ new_fork_aggregate_local_mean: LOGGER.info( 'Choose current fork %s over new fork %s: ' 'Current fork aggregate local mean (%f) greater than new ' 'fork aggregate local mean (%f)', cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], current_fork_consensus_state.aggregate_local_mean, new_fork_aggregate_local_mean) chosen_fork_head = cur_fork_head elif new_fork_aggregate_local_mean > \ current_fork_consensus_state.aggregate_local_mean: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork aggregate local mean (%f) greater than current ' 'fork aggregate local mean (%f)', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_aggregate_local_mean, current_fork_consensus_state.aggregate_local_mean) chosen_fork_head = new_fork_head # Criterion #4: If we have gotten to this point and we have not chosen # yet, we are going to fall back on using the block identifiers # (header signatures) . The lexicographically larger one will be the # chosen one. The chance that they are equal are infinitesimally # small. if chosen_fork_head is None: if cur_fork_head.header_signature > \ new_fork_head.header_signature: LOGGER.info( 'Choose current fork %s over new fork %s: ' 'Current fork header signature (%s) greater than new fork ' 'header signature (%s)', cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8]) chosen_fork_head = cur_fork_head else: LOGGER.info( 'Choose new fork %s over current fork %s: ' 'New fork header signature (%s) greater than current fork ' 'header signature (%s)', new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8], new_fork_head.header_signature[:8], cur_fork_head.header_signature[:8]) chosen_fork_head = new_fork_head # Now that we have chosen a fork for the chain head, if we chose the # new fork and it is a PoET block (i.e., it has a wait certificate), # we need to create consensus state store information for the new # fork's chain head. if chosen_fork_head == new_fork_head: # Get the state view for the previous block in the chain so we can # create a PoET enclave previous_block = None try: previous_block = \ self._block_cache[new_fork_head.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) validator_registry_view = ValidatorRegistryView(state_view) try: # Get the validator info for the validator that claimed the # fork head validator_info = \ validator_registry_view.get_validator_info( new_fork_head.header.signer_public_key) # Get the consensus state for the new fork head's previous # block, let the consensus state update itself appropriately # based upon the validator claiming a block, and then # associate the consensus state with the new block in the # store. consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=new_fork_head.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) consensus_state.validator_did_claim_block( validator_info=validator_info, wait_certificate=new_fork_wait_certificate, poet_settings_view=PoetSettingsView(state_view)) self._consensus_state_store[new_fork_head.identifier] = \ consensus_state LOGGER.debug('Create consensus state: BID=%s, ALM=%f, TBCC=%d', new_fork_head.identifier[:8], consensus_state.aggregate_local_mean, consensus_state.total_block_claim_count) except KeyError: # This _should_ never happen. The new potential fork head # has to have been a PoET block and for it to be verified # by the PoET block verifier, it must have been signed by # validator in the validator registry. If not found, we # are going to just stick with the current fork head. LOGGER.error( 'New fork head claimed by validator not in validator ' 'registry: %s...%s', new_fork_head.header.signer_public_key[:8], new_fork_head.header.signer_public_key[-8:]) chosen_fork_head = cur_fork_head return chosen_fork_head == new_fork_head
def verify_block(self, block_wrapper): """Check that the block received conforms to the consensus rules. Args: block_wrapper (BlockWrapper): The block to validate. Returns: Boolean: True if the Block is valid, False if the block is invalid. """ # Get the state view for the previous block in the chain so we can # create a PoET enclave and validator registry view previous_block = None try: previous_block = \ self._block_cache[block_wrapper.previous_block_id] except KeyError: pass state_view = \ BlockWrapper.state_view_for_block( block_wrapper=previous_block, state_view_factory=self._state_view_factory) poet_enclave_module = \ factory.PoetEnclaveFactory.get_poet_enclave_module( state_view=state_view, config_dir=self._config_dir, data_dir=self._data_dir) validator_registry_view = ValidatorRegistryView(state_view) # Grab the validator info based upon the block signer's public # key try: validator_info = \ validator_registry_view.get_validator_info( block_wrapper.header.signer_public_key) except KeyError: LOGGER.error( 'Block %s rejected: Received block from an unregistered ' 'validator %s...%s', block_wrapper.identifier[:8], block_wrapper.header.signer_public_key[:8], block_wrapper.header.signer_public_key[-8:]) return False LOGGER.debug( 'Block Signer Name=%s, ID=%s...%s, PoET public key=' '%s...%s', validator_info.name, validator_info.id[:8], validator_info.id[-8:], validator_info.signup_info.poet_public_key[:8], validator_info.signup_info.poet_public_key[-8:]) # For the candidate block, reconstitute the wait certificate # and verify that it is valid wait_certificate = \ utils.deserialize_wait_certificate( block=block_wrapper, poet_enclave_module=poet_enclave_module) if wait_certificate is None: LOGGER.error( 'Block %s rejected: Block from validator %s (ID=%s...%s) was ' 'not created by PoET consensus module', block_wrapper.identifier[:8], validator_info.name, validator_info.id[:8], validator_info.id[-8:]) return False # Get the consensus state and PoET configuration view for the block # that is being built upon consensus_state = \ ConsensusState.consensus_state_for_block_id( block_id=block_wrapper.previous_block_id, block_cache=self._block_cache, state_view_factory=self._state_view_factory, consensus_state_store=self._consensus_state_store, poet_enclave_module=poet_enclave_module) poet_settings_view = PoetSettingsView(state_view=state_view) previous_certificate_id = \ utils.get_previous_certificate_id( block_header=block_wrapper.header, block_cache=self._block_cache, poet_enclave_module=poet_enclave_module) try: wait_certificate.check_valid( poet_enclave_module=poet_enclave_module, previous_certificate_id=previous_certificate_id, poet_public_key=validator_info.signup_info.poet_public_key, consensus_state=consensus_state, poet_settings_view=poet_settings_view) except ValueError as error: LOGGER.error( 'Block %s rejected: Wait certificate check failed - %s', block_wrapper.identifier[:8], error) return False # Reject the block if the validator signup information fails the # freshness check. if consensus_state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=poet_settings_view, block_cache=self._block_cache): LOGGER.error( 'Block %s rejected: Validator signup information not ' 'committed in a timely manner.', block_wrapper.identifier[:8]) return False # Reject the block if the validator has already claimed the key block # limit for its current PoET key pair. if consensus_state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=poet_settings_view): LOGGER.error( 'Block %s rejected: Validator has reached maximum number of ' 'blocks with key pair.', block_wrapper.identifier[:8]) return False # Reject the block if the validator has not waited the required number # of blocks between when the block containing its validator registry # transaction was committed to the chain and trying to claim this # block if consensus_state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_wrapper.block_num, validator_registry_view=validator_registry_view, poet_settings_view=poet_settings_view, block_store=self._block_cache.block_store): LOGGER.error( 'Block %s rejected: Validator has not waited long enough ' 'since registering validator information.', block_wrapper.identifier[:8]) return False # Reject the block if the validator is claiming blocks at a rate that # is more frequent than is statistically allowed (i.e., zTest) if consensus_state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id=block_wrapper.previous_block_id, poet_settings_view=poet_settings_view, population_estimate=wait_certificate.population_estimate( poet_settings_view=poet_settings_view), block_cache=self._block_cache, poet_enclave_module=poet_enclave_module): LOGGER.error( 'Block %s rejected: Validator is claiming blocks too ' 'frequently.', block_wrapper.identifier[:8]) return False return True