def test_get_validators(self): """Given a state view with multiple validators, and the 'validator_map' entry, verify that get_validators returns the list of just ValidatorInfo instances. """ state_view = MockStateView({ to_address('validator_map'): b'this should be ignored', to_address('my_id'): ValidatorInfo(name='my_validator', id='my_id', signup_info=SignUpInfo(poet_public_key='my_pubkey', proof_data='beleive me', anti_sybil_id='no sybil'), transaction_id="signature").SerializeToString(), to_address('another_id'): ValidatorInfo(name='your_validator', id='another_id', signup_info=SignUpInfo(poet_public_key='your_pubkey', proof_data='you betcha', anti_sybil_id='poor sybil'), transaction_id="signature").SerializeToString() }) validator_registry_view = ValidatorRegistryView(state_view) infos = validator_registry_view.get_validators() self.assertEqual(2, len(infos)) self.assertEqual('my_validator', infos['my_id'].name) self.assertEqual('your_validator', infos['another_id'].name)
def test_block_claim_limit(self): """Verify that consensus state properly indicates whether or not a validator has reached the block claim limit """ mock_wait_certificate = mock.Mock() mock_wait_certificate.duration = 3.14 mock_wait_certificate.local_mean = 5.0 mock_poet_settings_view = mock.Mock() mock_poet_settings_view.key_block_claim_limit = 10 mock_poet_settings_view.population_estimate_sample_size = 50 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) state = consensus_state.ConsensusState() # Verify that validator does not trigger key block claim limit and also # "claim" blocks for _ in range(mock_poet_settings_view.key_block_claim_limit): self.assertFalse( state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=mock_poet_settings_view)) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=mock_wait_certificate, poet_settings_view=mock_poet_settings_view) # Now that validator has claimed limit for key, verify that it triggers # the test self.assertTrue( state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=mock_poet_settings_view)) # Switch keys and verify that validator again doesn't trigger test validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_002')) self.assertFalse( state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=mock_poet_settings_view))
def test_z_policy(self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_settings_view, mock_block_wrapper, mock_consensus_state_store): """ Test verifies the Z Policy: that PoET Block Verifier fails if a validator attempts to claim more blocks frequently than is allowed """ # create a mock_validator_registry_view that does nothing # in get_validator_info mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state( claiming_too_frequently=True) mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # check test mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') with mock.patch('sawtooth_poet.poet_consensus.poet_block_verifier.' 'LOGGER') as mock_logger: block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_verifier.verify_block(block_wrapper=mock_block)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue('Validator is claiming blocks too ' 'frequently' in message)
def test_new_fork_head_not_poet_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ Test verifies that if the new fork head is not a valid block, raises appropriate exception """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # Make utils pretend it cannot deserialize the wait certificate # of the new fork head mock_utils.deserialize_wait_certificate.return_value = None mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # check test fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') with self.assertRaises(TypeError) as cm: fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header) self.assertEqual( 'New fork head {} is not a PoET block', str(cm.exception))
def test_get_validator_info(self): """Given a state view that contains a state entry for a given validator info, verify that the validator registry returns a ValidatorInfo when get_validator_info is called with the validator's id.""" state_view = MockStateView({ to_address('my_id'): ValidatorInfo(registered='sure', name='my_validator', id='my_id', signup_info=SignUpInfo(poet_public_key='my_pubkey', proof_data='beleive me', anti_sybil_id='no sybil'), transaction_id="signature").SerializeToString() }) validator_registry_view = ValidatorRegistryView(state_view) info = validator_registry_view.get_validator_info('my_id') self.assertEqual('my_id', info.id) self.assertEqual('my_validator', info.name) self.assertEqual('sure', info.registered) self.assertEqual("signature", info.transaction_id) self.assertEqual('my_pubkey', info.signup_info.poet_public_key) self.assertEqual('beleive me', info.signup_info.proof_data) self.assertEqual('no sybil', info.signup_info.anti_sybil_id)
def test_non_poet_block(self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_config_view, mock_block_wrapper, mock_consensus_state_store): """Verify that the PoET block verifier indicates failure if the block is not a PoET block (i.e., the consensus field in the block header is not a serialized wait certificate). """ # Ensure that the consensus state does not generate failures that would # allow this test to pass mock_state = mock.Mock() mock_state.validator_signup_was_committed_too_late.return_value = False mock_state.validator_has_claimed_block_limit.return_value = False mock_state.validator_is_claiming_too_early.return_value = False mock_state.validator_is_claiming_too_frequently.return_value = False mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # Make utils pretend it cannot deserialize the wait certificate mock_utils.deserialize_wait_certificate.return_value = None mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_validator_registry_view.return_value.get_validator_info.\ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) with mock.patch('sawtooth_poet.poet_consensus.poet_block_verifier.' 'LOGGER') as mock_logger: block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_verifier.verify_block( block_wrapper=mock_block)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue( 'was not created by PoET consensus module' in message)
def test_block_verifier_valid_block_claim( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_config_view, mock_block_wrapper, mock_consensus_state_store): """ Test verifies that PoET Block Verifier succeeds if a validator successfully passes all criteria necessary to claim a block """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = mock.Mock() mock_state.validator_signup_was_committed_too_late.return_value = False mock_state.validator_has_claimed_block_limit.return_value = False mock_state.validator_is_claiming_too_early.return_value = False mock_state.validator_is_claiming_too_frequently.return_value = False mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # check test mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertTrue( block_verifier.verify_block( block_wrapper=mock_block))
def test_invalid_wait_certificate( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_settings_view, mock_block_wrapper, mock_consensus_state_store): # Ensure that the consensus state does not generate failures that would # allow this test to pass mock_state = MockConsensusState.create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # Make the certificate's check_valid pretend it failed mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.side_effect = \ ValueError('Unit test fake failure') mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_validator_registry_view.return_value.get_validator_info.\ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) with mock.patch('sawtooth_poet.poet_consensus.poet_block_verifier.' 'LOGGER') as mock_logger: block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_verifier.verify_block(block_wrapper=mock_block)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue('Wait certificate check failed' in message)
def do_get_missing_validator_state(): """Verify that retrieving missing validator state returns appropriate default values. """ state = consensus_state.ConsensusState() # Try to get a non-existent validator ID and verify it returns default # value validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) validator_state = state.get_validator_state(validator_info=validator_info)
def do_get_missing_validator_state(): state = consensus_state.ConsensusState() # Try to get a non-existent validator ID and verify it returns default # value validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) validator_state = \ state.get_validator_state(validator_info=validator_info) assert validator_state.key_block_claim_count == 0 assert validator_state.poet_public_key == 'key_001' assert validator_state.total_block_claim_count == 0
def test_has_validator_info(self): """Given a state view that contains a state entry for a given validator info, verify that the validator registry returns a true when has_validator_info is called with the validator's id.""" state_view = MockStateView({ to_address('my_id'): ValidatorInfo(name='my_validator', id='my_id', signup_info=SignUpInfo(poet_public_key='my_pubkey', proof_data='beleive me', anti_sybil_id='no sybil'), transaction_id="signature").SerializeToString() }) validator_registry_view = ValidatorRegistryView(state_view) self.assertTrue(validator_registry_view.has_validator_info('my_id'))
def do_validator_did_claim_block(): state = consensus_state.ConsensusState() wait_certificate = mock.Mock() wait_certificate.duration = 3.1415 wait_certificate.local_mean = 5.0 poet_settings_view = mock.Mock() poet_settings_view.population_estimate_sample_size = 50 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) # Have a non-existent validator claim a block, which should cause the # consensus state to add and set statistics appropriately. state.validator_did_claim_block(validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) validator_state = \ state.get_validator_state(validator_info=validator_info) # Have the existing validator claim another block and verify that # the consensus and validator statistics are updated properly state.validator_did_claim_block(validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) validator_state = \ state.get_validator_state(validator_info=validator_info) # Have the existing validator claim another block, but with a new key, # and verify that the consensus and validator statistics are updated # properly validator_info.signup_info.poet_public_key = 'key_002' state.validator_did_claim_block(validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) validator_state = \ state.get_validator_state(validator_info=validator_info)
def test_get_missing_validator_state(self): """Verify that retrieving missing validator state returns appropriate default values. """ state = consensus_state.ConsensusState() # Try to get a non-existent validator ID and verify it returns default # value validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) validator_state = \ state.get_validator_state(validator_info=validator_info) self.assertEqual(validator_state.key_block_claim_count, 0) self.assertEqual(validator_state.poet_public_key, 'key_001') self.assertEqual(validator_state.total_block_claim_count, 0)
def test_different_previous_block_id( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ When both current and new fork heads are valid PoET blocks with different previous block ids, the test verifies that the one with the higher aggregate local mean wins """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None # set new_mock_wait_certificate local_mean and duration mock_wait_certificate.local_mean = 0.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate ] mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='3', header_signature='00112233445566778899aabbccddeeff') fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # Subtest 1: when the current fork head has # the higher aggregate local mean # create a mock_cur_fork_consensus_state mock_cur_fork_consensus_state = mock.Mock() mock_cur_fork_consensus_state.aggregate_local_mean = 1.0 # create a mock_new_fork_consensus_state mock_new_fork_consensus_state = mock.Mock() mock_new_fork_consensus_state.aggregate_local_mean = 0.0 # set mock_consensus_state.consensus_state_for_block_id return # the current & new fork consensus states mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_new_fork_consensus_state] # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertFalse(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('Current fork aggregate local mean (%f) ' 'greater than new fork aggregate local mean' in message) # Subtest 2: when the new fork head has # the higher aggregate local mean # set mock_consensus_state.consensus_state_for_block_id return # the current & new fork consensus states mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_new_fork_consensus_state, mock_new_fork_consensus_state] # change the aggregate_local_mean values mock_cur_fork_consensus_state.aggregate_local_mean = 0.0 mock_new_fork_consensus_state.aggregate_local_mean = 1.0 # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork aggregate local mean (%f) ' 'greater than current fork aggregate local mean ' in message) # Subtest 3: when both the new & current fork heads have # the same aggregate local mean # set mock_consensus_state.consensus_state_for_block_id return # the current & new fork consensus states mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_new_fork_consensus_state, mock_new_fork_consensus_state] # set the aggregate_local_mean values equal mock_cur_fork_consensus_state.aggregate_local_mean = 1.0 mock_new_fork_consensus_state.aggregate_local_mean = 1.0 # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork header signature (%s) ' 'greater than current fork header signature (%s)' in message) # Subset 4: If we have gotten to this point and we have not chosen # a fork head yet, we are going to fall back # on using the block identifiers (header signatures). # The lexicographically larger one will be the chosen one. # create mock_new_fork_head with a smaller header_signature mock_smaller_header_signature = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='4', header_signature='00112233445566778899aabbccddee') # create a mock_smaller_header_signature_consensus_state mock_smaller_header_signature_consensus_state = mock.Mock() mock_smaller_header_signature_consensus_state.\ aggregate_local_mean = 0.0 mock_cur_fork_consensus_state.aggregate_local_mean = 0.0 mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_smaller_header_signature_consensus_state] # check test when Current fork header signature is greater than # the new fork header signature with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertFalse(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_smaller_header_signature)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('Current fork header signature' '(%s) greater than new fork header signature (%s)' in message) # Subtest 5: Check when new header signature is greater than # the current fork header signature mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_smaller_header_signature_consensus_state, mock_new_fork_consensus_state, mock_new_fork_consensus_state] mock_smaller_header_signature_consensus_state.\ aggregate_local_mean = 0.0 mock_new_fork_consensus_state.aggregate_local_mean = 0.0 # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_smaller_header_signature, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork header signature (%s) ' 'greater than current fork header signature (%s)' in message)
def test_both_valid_poet_blocks( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ If both current and new fork heads are valid PoET blocks, the test checks if they share the same immediate previous block, then the one with the smaller wait duration is chosen """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None # set new_mock_wait_certificate local_mean and duration mock_wait_certificate.local_mean = 0.0 mock_wait_certificate.duration = 1.0 # create a new_fork_mock_wait_certificate with a higher duration time new_fork_mock_wait_certificate = mock.Mock() new_fork_mock_wait_certificate.check_valid.return_value = None new_fork_mock_wait_certificate.local_mean = 0.0 new_fork_mock_wait_certificate.duration = 2.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head that is used in # poet_fork_resolver.compare() # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, new_fork_mock_wait_certificate ] mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head with same previous block id mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # Subtest 1: when current fork head has the smaller wait duration with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertFalse(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('Current fork wait duration (%f) ' 'less than new fork wait duration (%f)' in message) # Subtest 2: when new fork head has the smaller wait duration # change new_fork_mock_wait_certificate duration to a smaller value new_fork_mock_wait_certificate.duration = 0.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, new_fork_mock_wait_certificate] # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork wait duration (%f) ' 'less than current fork wait duration ' in message) # Subtest 3: when new & current fork heads have # the same wait duration # change new_fork_mock_wait_certificate duration to a smaller value new_fork_mock_wait_certificate.duration = 1.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, new_fork_mock_wait_certificate] # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork header signature (%s) ' 'greater than current fork header signature (%s)' in message)
def test_cur_fork_head_not_poet_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ Test verifies that if the current fork head is not a valid block, and if new_fork_head.previous_block_id == cur_fork_head.identifier then the new fork head switches consensus. Otherwise, raises the appropriate exception - trying to compare a PoET block to a non-PoET block that is not the direct predecessor """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head that is used in # poet_fork_resolver.compare() # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [None, mock_wait_certificate, None, mock_wait_certificate] mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # Subtest 1: check that the test fails when the current # fork head is not a valid PoET block with self.assertRaises(TypeError) as cm: fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header) self.assertEqual( 'Trying to compare a PoET block to a non-PoET ' 'block that is not the direct predecessor', str(cm.exception)) # Subtest 2: check that if new_fork_head.previous_block_id # == cur_fork_head.identifier # then the new fork head switches consensus # modify mock_cur_fork_header.identifier mock_cur_fork_header.identifier = \ mock_new_fork_header.previous_block_id # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork head switches consensus to PoET' in message)
def test_block_publisher_doesnt_finalize_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_wait_certificate, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher doesn't finalize a candidate block that doesn't have a valid wait certificate. """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that pretends to fail mock_wait_certificate.create_wait_certificate.side_effect = \ ValueError('Unit test fake failure') # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState().create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # create mock_batch_publisher mock_batch_publisher = mock.Mock( identity_signing_key=signing.generate_private_key()) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.' 'LOGGER') as mock_logger: block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') with mock.patch('sawtooth_poet.poet_consensus.' 'poet_block_publisher.json') as _: self.assertFalse( block_publisher.finalize_block( block_header=mock_block.header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue('Failed to create wait certificate: ' in message)
def test_local_mean(self): """Verify that the consensus state properly computes the local mean during both the bootstrapping phase (i.e., before there are enough blocks in the chain to satisfy the population estimate sample size) and once there are enough blocks in the chain. """ mock_poet_settings_view = mock.Mock() mock_poet_settings_view.target_wait_time = 30.0 mock_poet_settings_view.initial_wait_time = 3000.0 mock_poet_settings_view.population_estimate_sample_size = 50 # Test that during bootstrapping, the local means adhere to the # following: # # ratio = 1.0 * blockCount / sampleSize # localMean = targetWaitTime*(1-ratio**2) + initialWaitTime*ratio**2 def _compute_fixed_local_mean(count): ratio = \ 1.0 * count / \ mock_poet_settings_view.population_estimate_sample_size return \ (mock_poet_settings_view.target_wait_time * (1 - ratio**2)) + \ (mock_poet_settings_view.initial_wait_time * ratio**2) validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) # We are first going to bootstrap the blockchain by claiming exactly # population estimate sample size blocks. Each one should match the # corresponding expected fixed local mean. wait_certificates = [] state = consensus_state.ConsensusState() sample_size = mock_poet_settings_view.population_estimate_sample_size for _ in range(sample_size): # Compute a wait certificate with a fixed local mean, add it to # our samples, verify that its local mean equals the one computed # by the consensus state, and then update the consensus state as if # the block with this wait certificate was claimed. mock_wait_certificate = mock.Mock() mock_wait_certificate.duration = \ random.uniform( TestConsensusState.MINIMUM_WAIT_TIME, TestConsensusState.MINIMUM_WAIT_TIME + 10) mock_wait_certificate.local_mean = \ _compute_fixed_local_mean(len(wait_certificates)) wait_certificates.append(mock_wait_certificate) self.assertAlmostEqual( first=mock_wait_certificate.local_mean, second=state.compute_local_mean(mock_poet_settings_view), places=4) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=mock_wait_certificate, poet_settings_view=mock_poet_settings_view) # Test that after bootstrapping, the local means adhere to the # following: # # sw, sm = 0.0 # for most recent population estimate sample size blocks: # sw += waitCertificate.duration - minimumWaitTime # sm += waitCertificate.localMean # localMean = targetWaitTme * (sm / sw) def _compute_historical_local_mean(wcs): sw = 0.0 sm = 0.0 for wc in wcs: sw += wc.duration - TestConsensusState.MINIMUM_WAIT_TIME sm += wc.local_mean return mock_poet_settings_view.target_wait_time * (sm / sw) # Let's run through another population estimate sample size blocks # and verify that we get the local means expected sample_size = mock_poet_settings_view.population_estimate_sample_size for _ in range(sample_size): # Compute a wait certificate with a historical local mean, add it # to our samples, evict the oldest sample, verify that its local # mean equals the one computed by the consensus state, and then # update the consensus state as if the block with this wait # certificate was claimed. mock_wait_certificate = mock.Mock() mock_wait_certificate.duration = \ random.uniform( TestConsensusState.MINIMUM_WAIT_TIME, TestConsensusState.MINIMUM_WAIT_TIME + 10) mock_wait_certificate.local_mean = \ _compute_historical_local_mean(wait_certificates) wait_certificates.append(mock_wait_certificate) wait_certificates = wait_certificates[1:] self.assertAlmostEqual( first=mock_wait_certificate.local_mean, second=state.compute_local_mean(mock_poet_settings_view), places=4) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=mock_wait_certificate, poet_settings_view=mock_poet_settings_view)
def test_z_policy( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_poet_settings_view, mock_block_wrapper): """ Z Policy: Test verifies that PoET Block Publisher fails if a validator attempts to claim more blocks frequently than is allowed """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state( claiming_too_frequently=True) mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state mock_consensus_state_store.return_value.__getitem__.return_value = \ mock_consensus_state # Create mock key state mock_poet_key_state_store.return_value.__getitem__.return_value = \ mock.Mock( sealed_signup_data='sealed signup data', has_been_refreshed=False) # create mock_signup_info mock_signup_info.unseal_signup_data.return_value = \ '00112233445566778899aabbccddeeff' # create mock_batch_publisher context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) signer = crypto_factory.new_signer(private_key) mock_batch_publisher = mock.Mock(identity_signer=signer) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.' 'LOGGER') as mock_logger: block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_publisher.initialize_block( block_header=mock_block.header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('is claiming blocks too ' 'frequently' in message)
def test_create(self): # Need to create signup information and wait timer first signup_info = \ SignupInfo.create_signup_info( poet_enclave_module=self.poet_enclave_module, originator_public_key_hash=self._originator_public_key_hash, nonce=NULL_BLOCK_IDENTIFIER) # create mock_poet_enclave_wait_timer mock_poet_enclave_wait_timer = \ mock.Mock(validator_address='1060 W Addison Street', duration=1.0, previous_certificate_id=NULL_BLOCK_IDENTIFIER, local_mean=5.0, signature='00112233445566778899aabbccddeeff', serialized_timer=None, request_time=time.time()) # create mock_poet_enclave_wait_certificate mock_poet_enclave_wait_certificate = \ mock.Mock(duration=1.0, previous_certificate_id=NULL_BLOCK_IDENTIFIER, local_mean=5.0, request_time=time.time(), validator_address='1060 W Addison Street', nonce=NULL_BLOCK_IDENTIFIER, block_hash="Reader's Digest", signature='00112233445566778899aabbccddeeff', serialized_certificate='001122334455667' '78899aabbccddeeff') # create mock_poet_enclave_module mock_poet_enclave_module = mock.Mock() mock_poet_enclave_module.create_wait_timer.return_value = \ mock_poet_enclave_wait_timer # set the mock enclave wait certificate and wait timer to # have the same request_time mock_poet_enclave_wait_certificate.request_time = \ mock_poet_enclave_wait_timer.request_time # set the mock enclave wait certificate and wait timer to # have the same previous_certificate_id mock_poet_enclave_wait_certificate.previous_certificate_id = \ mock_poet_enclave_wait_timer.previous_certificate_id # set the identifier for mock_poet_enclave_wait_certificate mock_poet_enclave_wait_certificate.identifier.return_value = \ mock_poet_enclave_wait_certificate.previous_certificate_id[:16] mock_poet_enclave_module.create_wait_certificate.return_value = \ mock_poet_enclave_wait_certificate mock_poet_enclave_module.deserialize_wait_certificate.return_value = \ mock_poet_enclave_wait_certificate # create wait timer wt = \ WaitTimer.create_wait_timer( poet_enclave_module=mock_poet_enclave_module, validator_address='1660 Pennsylvania Avenue NW', previous_certificate_id=NULL_BLOCK_IDENTIFIER, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view) # Now we can create a wait certificate and verify that it correlates # to the wait timer we just created wc = \ WaitCertificate.create_wait_certificate( poet_enclave_module=mock_poet_enclave_module, wait_timer=wt, block_hash="Reader's Digest") self.assertIsNotNone(wc) self.assertEqual(wc.previous_certificate_id, wt.previous_certificate_id) self.assertAlmostEqual(wc.local_mean, wt.local_mean) self.assertAlmostEqual(wc.request_time, wt.request_time) self.assertAlmostEqual(wc.duration, wt.duration) self.assertEqual(wc.validator_address, wt.validator_address) self.assertEqual(wc.block_hash, "Reader's Digest") self.assertIsNotNone(wc.signature) self.assertIsNotNone(wc.identifier) # A newly-created wait certificate should be valid wc.check_valid(poet_enclave_module=mock_poet_enclave_module, previous_certificate_id=NULL_BLOCK_IDENTIFIER, poet_public_key=signup_info.poet_public_key, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view) validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) self.mock_consensus_state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wc, poet_settings_view=self.mock_poet_settings_view) # Create another wait certificate and verify it is valid also wt = \ WaitTimer.create_wait_timer( poet_enclave_module=mock_poet_enclave_module, validator_address='1660 Pennsylvania Avenue NW', previous_certificate_id=wc.identifier, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view) # Now we can create a wait certificate and verify that it correlates # to the wait timer we just created another_wc = \ WaitCertificate.create_wait_certificate( poet_enclave_module=mock_poet_enclave_module, wait_timer=wt, block_hash="Pepto Bismol") another_wc.check_valid(poet_enclave_module=mock_poet_enclave_module, previous_certificate_id=wc.identifier, poet_public_key=signup_info.poet_public_key, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view)
def test_signup_commit_maximum_delay(self): """Verify that consensus state properly indicates whether or not a validator signup was committed before the maximum delay occurred """ block_dictionary = { '001': mock.Mock(previous_block_id='000', identifier='001'), '002': mock.Mock(previous_block_id='001', identifier='002'), '003': mock.Mock(previous_block_id='002', identifier='003'), '004': mock.Mock(previous_block_id='003', identifier='004') } mock_block_cache = mock.MagicMock() mock_block_cache.__getitem__.side_effect = block_dictionary.__getitem__ mock_block_cache.block_store.get_block_by_transaction_id.\ return_value = block_dictionary['004'] mock_poet_settings_view = mock.Mock() mock_poet_settings_view.signup_commit_maximum_delay = 1 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_002', nonce='999'), transaction_id='transaction_001') # Simulate reaching beginning of chain before finding block ID with mock.patch('sawtooth_poet.poet_consensus.consensus_state.utils.' 'block_id_is_genesis') as mock_block_id_is_genesis: mock_block_id_is_genesis.return_value = True state = consensus_state.ConsensusState() self.assertTrue( state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=mock_poet_settings_view, block_cache=mock_block_cache)) # Simulate reaching the maximum commit delay before finding the block # we want with different delays validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_002', nonce='999'), transaction_id='transaction_001') with mock.patch('sawtooth_poet.poet_consensus.consensus_state.utils.' 'block_id_is_genesis') as mock_block_id_is_genesis: mock_block_id_is_genesis.return_value = False state = consensus_state.ConsensusState() for delay in range(len(block_dictionary) - 1): mock_poet_settings_view.signup_commit_maximum_delay = delay self.assertTrue( state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=mock_poet_settings_view, block_cache=mock_block_cache)) # Simulate finding block before maximum delay with mock.patch('sawtooth_poet.poet_consensus.consensus_state.utils.' 'block_id_is_genesis') as mock_block_id_is_genesis: mock_block_id_is_genesis.return_value = False state = consensus_state.ConsensusState() for (nonce, delay) in zip(['001', '002', '003'], [2, 1, 0]): mock_poet_settings_view.signup_commit_maximum_delay = delay validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_002', nonce=nonce), transaction_id='transaction_001') self.assertFalse( state.validator_signup_was_committed_too_late( validator_info=validator_info, poet_settings_view=mock_poet_settings_view, block_cache=mock_block_cache))
def test_block_claim_frequency(self, mock_deserialize): """Verify that consensus state properly indicates whether or not a validator is trying to claim blocks too frequently """ mock_poet_settings_view = mock.Mock() mock_poet_settings_view.target_wait_time = 5.0 mock_poet_settings_view.key_block_claim_limit = 10 mock_poet_settings_view.population_estimate_sample_size = 50 mock_poet_settings_view.ztest_minimum_win_count = 3 mock_poet_settings_view.ztest_maximum_win_deviation = 3.075 mock_wait_certificate = mock.Mock() mock_wait_certificate.duration = 3.14 mock_wait_certificate.local_mean = \ mock_poet_settings_view.target_wait_time * 2 mock_wait_certificate.population_estimate.return_value = 2 mock_deserialize.return_value = mock_wait_certificate mock_block = mock.Mock() mock_block.previous_block_id = 'block_000' mock_block.header.signer_public_key = 'validator_001_key' mock_block_cache = mock.MagicMock() mock_block_cache.__getitem__.return_value = mock_block validator_info = \ ValidatorInfo( id=mock_block.header.signer_public_key, signup_info=SignUpInfo( poet_public_key='key_002')) # Verify that zTest does not apply while there are fewer than # population estimate sample size blocks committed state = consensus_state.ConsensusState() sample_size = mock_poet_settings_view.population_estimate_sample_size for _ in range(sample_size): self.assertFalse(state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id='previous_id', poet_settings_view=mock_poet_settings_view, population_estimate=2, block_cache=mock_block_cache, poet_enclave_module=None)) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=mock_wait_certificate, poet_settings_view=mock_poet_settings_view) # Per the spec, a z-score is calculated for each block, beyond the # minimum, that the validator has claimed. The z-score is computed as: # # zScore = (observed - expected) / stddev # # Where: # observed = the number of blocks won by validator # expected = the number statistically expected to be won by validator, # which in the case is 1/2 of the blocks (as population estimate is # fixed at 2) # probability = expected / number blocks # stddev = square root(number blocks * probability * (1 - probability) # Compute how many more blocks beyond the minimum that the validator # can claim without triggering the frequency test observed = mock_poet_settings_view.ztest_minimum_win_count while True: expected = \ float(observed) / \ mock_wait_certificate.population_estimate.return_value probability = expected / observed stddev = math.sqrt(observed * probability * (1 - probability)) z_score = (observed - expected) / stddev if z_score > mock_poet_settings_view.ztest_maximum_win_deviation: break observed += 1 # Verify that the validator can claim up to just before the number of # blocks calculated above (this would be the blocks before the minimum # win count as well as up to just before it triggered the frequency # test). for _ in range(observed - 1): self.assertFalse(state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id='previous_id', poet_settings_view=mock_poet_settings_view, population_estimate=2, block_cache=mock_block_cache, poet_enclave_module=None)) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=mock_wait_certificate, poet_settings_view=mock_poet_settings_view) # Verify that now the validator triggers the frequency test self.assertTrue(state.validator_is_claiming_too_frequently( validator_info=validator_info, previous_block_id='previous_id', poet_settings_view=mock_poet_settings_view, population_estimate=2, block_cache=mock_block_cache, poet_enclave_module=None))
def test_block_claim_delay(self): """Verify that consensus state properly indicates whether or not a validator is trying to claim a block before the block claim delay """ mock_validator_registry_view = mock.Mock() mock_validator_registry_view.get_validators.return_value = [ 'validator_001', 'validator_002', 'validator_003', 'validator_004', 'validator_005', 'validator_006', 'validator_008', 'validator_009', 'validator_010' ] mock_wait_certificate = mock.Mock() mock_wait_certificate.duration = 3.14 mock_wait_certificate.local_mean = 5.0 mock_poet_settings_view = mock.Mock() mock_poet_settings_view.key_block_claim_limit = 10000 mock_poet_settings_view.block_claim_delay = 2 mock_poet_settings_view.population_estimate_sample_size = 50 mock_block = mock.Mock() mock_block.block_num = 100 mock_block_store = mock.Mock() mock_block_store.get_block_by_transaction_id.return_value = mock_block validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_002'), transaction_id='transaction_001') # Claim a bunch of blocks to get past the bootstrapping necessary to # get to the point where we will actually test the block claim delay state = consensus_state.ConsensusState() for _ in range(100): state.validator_did_claim_block( validator_info=validator_info, wait_certificate=mock_wait_certificate, poet_settings_view=mock_poet_settings_view) # Test with blocks satisfying the claim delay for block_number in [103, 105, 110, 200, 1000]: self.assertFalse( state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_number, validator_registry_view=mock_validator_registry_view, poet_settings_view=mock_poet_settings_view, block_store=mock_block_store)) # Test with blocks not satisfying the claim delay for block_number in [100, 101, 102]: self.assertTrue( state.validator_is_claiming_too_early( validator_info=validator_info, block_number=block_number, validator_registry_view=mock_validator_registry_view, poet_settings_view=mock_poet_settings_view, block_store=mock_block_store))
def test_serialize(self): """Verify that deserializing invalid data results in the appropriate error. Verify that serializing state and then deserializing results in the same state values. """ poet_settings_view = mock.Mock() poet_settings_view.population_estimate_sample_size = 50 # Simple deserialization check of buffer for invalid_state in [None, '', 1, 1.1, (), [], {}]: state = consensus_state.ConsensusState() with self.assertRaises(ValueError): state.parse_from_bytes(cbor.dumps(invalid_state)) # Missing aggregate local mean with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.loads') \ as mock_loads: mock_loads.return_value = { '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': 0, '_validators': {} } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Invalid aggregate local mean for invalid_alm in [None, 'not a float', (), [], {}, -1, float('nan'), float('inf'), float('-inf')]: state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') \ as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': invalid_alm, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': 0, '_validators': {} } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Missing population samples with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.loads') \ as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_total_block_claim_count': 0, '_validators': {} } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Invalid population samples for invalid_ps in [None, 1, 1.0, 'str', (1,), [1], (1.0, None), (1.0, 'str'), (1.0, ()), (1.0, []), (1.0, {}), (1.0, float('nan')), (1.0, float('inf')), (1.0, float('-inf')), (float('nan'), 1.0), (float('inf'), 1.0), (float('-inf'), 1.0), (None, 1.0), ('str', 1.0), ((), 1.0), ([], 1.0), ({}, 1.0), [1.0, None], [1.0, 'str'], [1.0, ()], [1.0, []], [1.0, {}], [1.0, float('nan')], [1.0, float('inf')], [1.0, float('-inf')], [float('nan'), 1.0], [float('inf'), 1.0], [float('-inf'), 1.0], [None, 1.0], ['str', 1.0], [(), 1.0], [[], 1.0], [{}, 1.0]]: state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') \ as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': invalid_ps, '_total_block_claim_count': 0, '_validators': {} } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Missing total block claim count with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.loads') \ as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_validators': {} } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Invalid total block claim count for invalid_tbcc in [None, 'not an int', (), [], {}, -1]: state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') \ as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': invalid_tbcc, '_validators': {} } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Invalid validators for invalid_validators in [None, '', 1, 1.1, (), []]: state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') \ as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': 0, '_validators': invalid_validators } with self.assertRaises(ValueError): state.parse_from_bytes(b'') state = consensus_state.ConsensusState() wait_certificate = mock.Mock() wait_certificate.duration = 3.14 wait_certificate.local_mean = 5.0 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) doppelganger_state = consensus_state.ConsensusState() # Truncate the serialized value on purpose with self.assertRaises(ValueError): doppelganger_state.parse_from_bytes( state.serialize_to_bytes()[:-1]) with self.assertRaises(ValueError): doppelganger_state.parse_from_bytes( state.serialize_to_bytes()[1:]) # Test invalid key block claim counts in validator state for invalid_kbcc in [None, (), [], {}, '1', 1.1, -1]: state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': 0, '_validators': { 'validator_001': [invalid_kbcc, 'ppk_001', 0] } } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Test invalid PoET public key in validator state for invalid_ppk in [None, (), [], {}, 1, 1.1, '']: state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': 0, '_validators': { 'validator_001': [0, invalid_ppk, 0] } } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Test total block claim count in validator state for invalid_tbcc in [None, (), [], {}, '1', 1.1, -1]: state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': 0, '_validators': { 'validator_001': [0, 'ppk_001', invalid_tbcc] } } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Test with total block claim count < key block claim count state = consensus_state.ConsensusState() with mock.patch( 'sawtooth_poet.poet_consensus.consensus_state.cbor.' 'loads') as mock_loads: mock_loads.return_value = { '_aggregate_local_mean': 0.0, '_population_samples': [(2.718, 3.1415), (1.618, 0.618)], '_total_block_claim_count': 0, '_validators': { 'validator_001': [2, 'ppk_001', 1] } } with self.assertRaises(ValueError): state.parse_from_bytes(b'') # Simple serialization of new consensus state and then deserialize # and compare state = consensus_state.ConsensusState() doppelganger_state = consensus_state.ConsensusState() doppelganger_state.parse_from_bytes(state.serialize_to_bytes()) self.assertEqual( state.aggregate_local_mean, doppelganger_state.aggregate_local_mean) self.assertEqual( state.total_block_claim_count, doppelganger_state.total_block_claim_count) # Now put a couple of validators in, serialize, deserialize, and # verify they are in deserialized wait_certificate_1 = mock.Mock() wait_certificate_1.duration = 3.14 wait_certificate_1.local_mean = 5.0 wait_certificate_2 = mock.Mock() wait_certificate_2.duration = 1.618 wait_certificate_2.local_mean = 2.718 mock_poet_settings_view = mock.Mock() mock_poet_settings_view.target_wait_time = 30.0 mock_poet_settings_view.initial_wait_time = 3000.0 mock_poet_settings_view.population_estimate_sample_size = 50 validator_info_1 = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) validator_info_2 = \ ValidatorInfo( id='validator_002', signup_info=SignUpInfo( poet_public_key='key_002')) state.validator_did_claim_block( validator_info=validator_info_1, wait_certificate=wait_certificate_1, poet_settings_view=poet_settings_view) state.validator_did_claim_block( validator_info=validator_info_2, wait_certificate=wait_certificate_2, poet_settings_view=poet_settings_view) doppelganger_state.parse_from_bytes(state.serialize_to_bytes()) self.assertEqual( state.aggregate_local_mean, doppelganger_state.aggregate_local_mean) self.assertAlmostEqual( first=state.compute_local_mean( poet_settings_view=mock_poet_settings_view), second=doppelganger_state.compute_local_mean( poet_settings_view=mock_poet_settings_view), places=4) self.assertEqual( state.total_block_claim_count, doppelganger_state.total_block_claim_count) validator_state = \ state.get_validator_state( validator_info=validator_info_1) doppleganger_validator_state = \ doppelganger_state.get_validator_state( validator_info=validator_info_1) self.assertEqual( validator_state.key_block_claim_count, doppleganger_validator_state.key_block_claim_count) self.assertEqual( validator_state.poet_public_key, doppleganger_validator_state.poet_public_key) self.assertEqual( validator_state.total_block_claim_count, doppleganger_validator_state.total_block_claim_count) validator_state = \ state.get_validator_state( validator_info=validator_info_2) doppleganger_validator_state = \ doppelganger_state.get_validator_state( validator_info=validator_info_2) self.assertEqual( validator_state.key_block_claim_count, doppleganger_validator_state.key_block_claim_count) self.assertEqual( validator_state.poet_public_key, doppleganger_validator_state.poet_public_key) self.assertEqual( validator_state.total_block_claim_count, doppleganger_validator_state.total_block_claim_count)
def create_signup_info(self, originator_public_key_hash, nonce, pse_manifest_status='OK'): # currently not used # _active_wait_timer = None # We are going to fake out the sealing the signup data. signup_data = { 'poet_public_key': self.poet_public_key, 'poet_private_key': self._poet_private_key } # Build up a fake SGX quote containing: # 1. The basename # 2. The report body that contains: # a. The enclave measurement # b. The report data SHA256(SHA256(OPK)|PPK) sgx_basename = \ sgx_structs.SgxBasename(name=self.__VALID_BASENAME__) sgx_measurement = \ sgx_structs.SgxMeasurement( m=self.__VALID_ENCLAVE_MEASUREMENT__) hash_input = \ '{0}{1}'.format( originator_public_key_hash.upper(), self.poet_public_key.upper()).encode() report_data = hashlib.sha256(hash_input).digest() sgx_report_data = sgx_structs.SgxReportData(d=report_data) sgx_report_body = \ sgx_structs.SgxReportBody( mr_enclave=sgx_measurement, report_data=sgx_report_data) sgx_quote = \ sgx_structs.SgxQuote( basename=sgx_basename, report_body=sgx_report_body) # Create a fake PSE manifest. A base64 encoding of the # originator public key hash should suffice. pse_manifest = \ base64.b64encode(originator_public_key_hash.encode()) timestamp = '2017-02-16T15:21:24.437048' # Fake our "proof" data. verification_report = OrderedDict([ ('epidPseudonym', originator_public_key_hash), ('id', base64.b64encode( hashlib.sha256( timestamp.encode()).hexdigest().encode()).decode()), ('isvEnclaveQuoteStatus', 'OK'), ('isvEnclaveQuoteBody', base64.b64encode(sgx_quote.serialize_to_bytes()).decode()), ('pseManifestStatus', pse_manifest_status), ('pseManifestHash', hashlib.sha256(base64.b64decode(pse_manifest)).hexdigest()), ('nonce', nonce), ('timestamp', timestamp) ]) proof_data = \ self.create_proof_data( verification_report=verification_report, evidence_payload={ 'pse_manifest': pse_manifest.decode() }) return \ SignUpInfo( poet_public_key=signup_data['poet_public_key'], proof_data=proof_data, anti_sybil_id=originator_public_key_hash, nonce=nonce)
def test_consensus_store_set_get(self, mock_lmdb): """Verify that externally visible state (len, etc.) of the consensus state store after set is expected. Verify that retrieving a previously set consensus state object results in the same values set. """ # Make LMDB return empty dict my_dict = {} mock_lmdb.return_value = my_dict mock_poet_settings_view = mock.Mock() mock_poet_settings_view.target_wait_time = 30.0 mock_poet_settings_view.initial_wait_time = 3000.0 mock_poet_settings_view.minimum_wait_time = 1.0 mock_poet_settings_view.population_estimate_sample_size = 50 store = \ consensus_state_store.ConsensusStateStore( data_dir=tempfile.gettempdir(), validator_id='0123456789abcdef') # Verify the length is zero and doesn't contain key self.assertEqual(len(store), 0) self.assertTrue('key' not in store) # Store consensus state state = consensus_state.ConsensusState() store['key'] = state # Verify the length and contains key self.assertEqual(len(store), 1) self.assertEqual(len(my_dict), 1) self.assertTrue('key' in store) self.assertTrue('key' in my_dict) # Retrieve the state and verify equality retrieved_state = store['key'] self.assertEqual(state.aggregate_local_mean, retrieved_state.aggregate_local_mean) self.assertEqual(state.total_block_claim_count, retrieved_state.total_block_claim_count) # Have a validator claim a block and update the store wait_certificate = mock.Mock() wait_certificate.duration = 3.1415 wait_certificate.local_mean = 5.0 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=mock_poet_settings_view) store['key'] = state # Verify the length and contains key self.assertEqual(len(store), 1) self.assertEqual(len(my_dict), 1) self.assertTrue('key' in store) self.assertTrue('key' in my_dict) # Retrieve the state and verify equality retrieved_state = store['key'] self.assertEqual(state.aggregate_local_mean, retrieved_state.aggregate_local_mean) self.assertEqual(state.total_block_claim_count, retrieved_state.total_block_claim_count) validator_state = \ retrieved_state.get_validator_state( validator_info=validator_info) retrieved_validator_state = \ retrieved_state.get_validator_state( validator_info=validator_info) self.assertEqual(validator_state.key_block_claim_count, retrieved_validator_state.key_block_claim_count) self.assertEqual(validator_state.poet_public_key, retrieved_validator_state.poet_public_key) self.assertEqual(validator_state.total_block_claim_count, retrieved_validator_state.total_block_claim_count) # Delete the key and then verify length and does not contain key del store['key'] self.assertEqual(len(store), 0) self.assertEqual(len(my_dict), 0) self.assertTrue('key' not in store) self.assertTrue('key' not in my_dict) with self.assertRaises(KeyError): _ = store['key']
def test_signup_info_not_committed_within_allowed_delay( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher fails if a validator's signup info was not committed to the block chain within the allowed configured delay """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff', nonce='nonce')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state( committed_too_late=True) mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state mock_consensus_state_store.return_value.__getitem__.return_value = \ mock_consensus_state # Create mock key state mock_poet_key_state_store.return_value.__getitem__.return_value = \ mock.Mock( sealed_signup_data='sealed signup data', has_been_refreshed=False) # create mock_signup_info mock_signup_info.create_signup_info.return_value = \ mock.Mock( poet_public_key='poet public key', proof_data='proof data', anti_sybil_id='anti-sybil ID', sealed_signup_data='sealed signup data') mock_signup_info.block_id_to_nonce.return_value = 'nonce' mock_signup_info.unseal_signup_data.return_value = \ '00112233445566778899aabbccddeeff' # create mock_batch_publisher context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) signer = crypto_factory.new_signer(private_key) mock_batch_publisher = mock.Mock(identity_signer=signer) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.' 'LOGGER') as mock_logger: block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_publisher.initialize_block( block_header=mock_block.header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. self.assertTrue( any( 'Validator signup information not committed in a timely ' 'manner.' in call[0][0] for call in mock_logger.info.call_args_list)) # check that create.signup_info() was called to create # the validator registry payload with new set of keys self.assertTrue(mock_signup_info.create_signup_info.called)
def test_block_publisher_finalize_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_wait_certificate, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher finalizes the block, meaning that the candidate block is good and should be generated. """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid my_wait_certificate = mock.Mock() my_wait_certificate.check_valid.return_value = None mock_wait_certificate.create_wait_certificate.return_value = \ my_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState().create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # create mock_batch_publisher mock_batch_publisher = mock.Mock( identity_signing_key=signing.generate_private_key()) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') with mock.patch('sawtooth_poet.poet_consensus.' 'poet_block_publisher.json') as _: self.assertTrue( block_publisher.finalize_block(block_header=mock_block.header))
def test_block_publisher_doesnt_claim_readiness( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_wait_time, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher doesn't claims readiness if the wait timer hasn't expired """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state mock_consensus_state_store.return_value.__getitem__.return_value = \ mock_consensus_state # Create mock key state mock_poet_key_state_store.return_value.__getitem__.return_value = \ mock.Mock( sealed_signup_data='sealed signup data', has_been_refreshed=False) # create mock_signup_info mock_signup_info.unseal_signup_data.return_value = \ '00112233445566778899aabbccddeeff' # create mock_batch_publisher context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) signer = crypto_factory.new_signer(private_key) mock_batch_publisher = mock.Mock(identity_signer=signer) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # create a mock_wait_timer that hasn't expired yet my_wait_time = mock.Mock() my_wait_time.has_expired.return_value = False mock_wait_time.create_wait_timer.return_value = my_wait_time # create mock_poet_enclave_module mock_poet_enclave_module = mock.Mock() mock_poet_enclave_module.return_value = \ mock_poet_enclave_factory.get_poet_enclave_module.return_value # check test block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # check initialize_block() first to set wait_timer self.assertTrue( block_publisher.initialize_block( block_header=mock_block.header)) # check that block_publisher only claims readiness # when the wait_timer has expired self.assertFalse( block_publisher.check_publish_block( block_header=mock_block.header))
def test_validator_did_claim_block(self): """Verify that trying to update consensus and validator state with validators that previous don't and do exist appropriately update the consensus and validator statistics. """ state = consensus_state.ConsensusState() wait_certificate = mock.Mock() wait_certificate.duration = 3.1415 wait_certificate.local_mean = 5.0 poet_settings_view = mock.Mock() poet_settings_view.population_estimate_sample_size = 50 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) # Have a non-existent validator claim a block, which should cause the # consensus state to add and set statistics appropriately. state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) self.assertEqual( state.aggregate_local_mean, wait_certificate.local_mean) self.assertEqual(state.total_block_claim_count, 1) validator_state = \ state.get_validator_state(validator_info=validator_info) self.assertEqual(validator_state.key_block_claim_count, 1) self.assertEqual(validator_state.poet_public_key, 'key_001') self.assertEqual(validator_state.total_block_claim_count, 1) # Have the existing validator claim another block and verify that # the consensus and validator statistics are updated properly state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) self.assertEqual( state.aggregate_local_mean, 2 * wait_certificate.local_mean) self.assertEqual(state.total_block_claim_count, 2) validator_state = \ state.get_validator_state(validator_info=validator_info) self.assertEqual(validator_state.key_block_claim_count, 2) self.assertEqual(validator_state.poet_public_key, 'key_001') self.assertEqual(validator_state.total_block_claim_count, 2) # Have the existing validator claim another block, but with a new key, # and verify that the consensus and validator statistics are updated # properly validator_info.signup_info.poet_public_key = 'key_002' state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) self.assertEqual( state.aggregate_local_mean, 3 * wait_certificate.local_mean) self.assertEqual(state.total_block_claim_count, 3) validator_state = \ state.get_validator_state(validator_info=validator_info) self.assertEqual(validator_state.key_block_claim_count, 1) self.assertEqual(validator_state.poet_public_key, 'key_002') self.assertEqual(validator_state.total_block_claim_count, 3)