def test_get_validators(self): """Given a state view with multiple validators, and the 'validator_map' entry, verify that get_validators returns the list of just ValidatorInfo instances. """ state_view = MockStateView({ to_address('validator_map'): b'this should be ignored', to_address('my_id'): ValidatorInfo(name='my_validator', id='my_id', signup_info=SignUpInfo(poet_public_key='my_pubkey', proof_data='beleive me', anti_sybil_id='no sybil'), transaction_id="signature").SerializeToString(), to_address('another_id'): ValidatorInfo(name='your_validator', id='another_id', signup_info=SignUpInfo(poet_public_key='your_pubkey', proof_data='you betcha', anti_sybil_id='poor sybil'), transaction_id="signature").SerializeToString() }) validator_registry_view = ValidatorRegistryView(state_view) infos = validator_registry_view.get_validators() self.assertEqual(2, len(infos)) self.assertEqual('my_validator', infos['my_id'].name) self.assertEqual('your_validator', infos['another_id'].name)
def apply(self, transaction, state): txn_header = TransactionHeader() txn_header.ParseFromString(transaction.header) pubkey = txn_header.signer_pubkey val_reg_payload = ValidatorRegistryPayload() val_reg_payload.ParseFromString(transaction.payload) # Check name validator_name = val_reg_payload.name if len(validator_name) > 64: raise InvalidTransaction( 'Illegal validator name {}'.format(validator_name)) # Check registering validator matches transaction signer. validator_id = val_reg_payload.id if validator_id != pubkey: raise InvalidTransaction( 'Signature mismatch on validator registration with validator' ' {} signed by {}'.format(validator_id, pubkey)) public_key_hash = hashlib.sha256(pubkey.encode()).hexdigest() signup_info = val_reg_payload.signup_info try: self._verify_signup_info( signup_info=signup_info, originator_public_key_hash=public_key_hash, val_reg_payload=val_reg_payload, state=state) except ValueError as error: raise InvalidTransaction( 'Invalid Signup Info: {0}, Reason: {1}'.format( signup_info, error)) validator_info = ValidatorInfo( registered="registered", name=validator_name, id=validator_id, signup_info=val_reg_payload.signup_info, transaction_id=transaction.signature ) _update_validator_state(state, validator_id, signup_info.anti_sybil_id, validator_info.SerializeToString())
def test_block_claim_limit(self): """Verify that consensus state properly indicates whether or not a validator has reached the block claim limit """ mock_wait_certificate = mock.Mock() mock_wait_certificate.duration = 3.14 mock_wait_certificate.local_mean = 5.0 mock_poet_settings_view = mock.Mock() mock_poet_settings_view.key_block_claim_limit = 10 mock_poet_settings_view.population_estimate_sample_size = 50 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) state = consensus_state.ConsensusState() # Verify that validator does not trigger key block claim limit and also # "claim" blocks for _ in range(mock_poet_settings_view.key_block_claim_limit): self.assertFalse( state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=mock_poet_settings_view)) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=mock_wait_certificate, poet_settings_view=mock_poet_settings_view) # Now that validator has claimed limit for key, verify that it triggers # the test self.assertTrue( state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=mock_poet_settings_view)) # Switch keys and verify that validator again doesn't trigger test validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_002')) self.assertFalse( state.validator_has_claimed_block_limit( validator_info=validator_info, poet_settings_view=mock_poet_settings_view))
def test_get_validator_info(self): """Given a state view that contains a state entry for a given validator info, verify that the validator registry returns a ValidatorInfo when get_validator_info is called with the validator's id.""" state_view = MockStateView({ to_address('my_id'): ValidatorInfo(registered='sure', name='my_validator', id='my_id', signup_info=SignUpInfo(poet_public_key='my_pubkey', proof_data='beleive me', anti_sybil_id='no sybil'), transaction_id="signature").SerializeToString() }) validator_registry_view = ValidatorRegistryView(state_view) info = validator_registry_view.get_validator_info('my_id') self.assertEqual('my_id', info.id) self.assertEqual('my_validator', info.name) self.assertEqual('sure', info.registered) self.assertEqual("signature", info.transaction_id) self.assertEqual('my_pubkey', info.signup_info.poet_public_key) self.assertEqual('beleive me', info.signup_info.proof_data) self.assertEqual('no sybil', info.signup_info.anti_sybil_id)
def test_non_poet_block(self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_config_view, mock_block_wrapper, mock_consensus_state_store): """Verify that the PoET block verifier indicates failure if the block is not a PoET block (i.e., the consensus field in the block header is not a serialized wait certificate). """ # Ensure that the consensus state does not generate failures that would # allow this test to pass mock_state = mock.Mock() mock_state.validator_signup_was_committed_too_late.return_value = False mock_state.validator_has_claimed_block_limit.return_value = False mock_state.validator_is_claiming_too_early.return_value = False mock_state.validator_is_claiming_too_frequently.return_value = False mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # Make utils pretend it cannot deserialize the wait certificate mock_utils.deserialize_wait_certificate.return_value = None mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_validator_registry_view.return_value.get_validator_info.\ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) with mock.patch('sawtooth_poet.poet_consensus.poet_block_verifier.' 'LOGGER') as mock_logger: block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_verifier.verify_block( block_wrapper=mock_block)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue( 'was not created by PoET consensus module' in message)
def test_new_fork_head_not_poet_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ Test verifies that if the new fork head is not a valid block, raises appropriate exception """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # Make utils pretend it cannot deserialize the wait certificate # of the new fork head mock_utils.deserialize_wait_certificate.return_value = None mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # check test fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') with self.assertRaises(TypeError) as cm: fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header) self.assertEqual( 'New fork head {} is not a PoET block', str(cm.exception))
def test_z_policy(self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_settings_view, mock_block_wrapper, mock_consensus_state_store): """ Test verifies the Z Policy: that PoET Block Verifier fails if a validator attempts to claim more blocks frequently than is allowed """ # create a mock_validator_registry_view that does nothing # in get_validator_info mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state( claiming_too_frequently=True) mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # check test mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') with mock.patch('sawtooth_poet.poet_consensus.poet_block_verifier.' 'LOGGER') as mock_logger: block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_verifier.verify_block(block_wrapper=mock_block)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue('Validator is claiming blocks too ' 'frequently' in message)
def test_block_verifier_valid_block_claim( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_config_view, mock_block_wrapper, mock_consensus_state_store): """ Test verifies that PoET Block Verifier succeeds if a validator successfully passes all criteria necessary to claim a block """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = mock.Mock() mock_state.validator_signup_was_committed_too_late.return_value = False mock_state.validator_has_claimed_block_limit.return_value = False mock_state.validator_is_claiming_too_early.return_value = False mock_state.validator_is_claiming_too_frequently.return_value = False mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # check test mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertTrue( block_verifier.verify_block( block_wrapper=mock_block))
def test_invalid_wait_certificate( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_poet_settings_view, mock_block_wrapper, mock_consensus_state_store): # Ensure that the consensus state does not generate failures that would # allow this test to pass mock_state = MockConsensusState.create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # Make the certificate's check_valid pretend it failed mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.side_effect = \ ValueError('Unit test fake failure') mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_validator_registry_view.return_value.get_validator_info.\ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) with mock.patch('sawtooth_poet.poet_consensus.poet_block_verifier.' 'LOGGER') as mock_logger: block_verifier = \ PoetBlockVerifier( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_verifier.verify_block(block_wrapper=mock_block)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue('Wait certificate check failed' in message)
def create_set_request_validator_info(self, validator_name, reg): signup_info = self.create_signup_info(self.pubkey_hash, "000") data = ValidatorInfo( registered=reg, name=validator_name, id=self.public_key, signup_info=signup_info, transaction_id="7a79305e9734fd511386ae877da8770d66c22e4c7b18db8eb2" "ff6ec16f5a3452749ee49a04ea8a805ec5ec8b5d1fdfbcc6f3" "bf6374c99c9a906bc2837d0ad25a").SerializeToString() address = self._key_to_address(self.public_key) return self._factory.create_set_request({address: data})
def create_set_request_validator_info(self, validator_name, reg): signup_info = self.create_signup_info(self.pubkey_hash, "000") data = ValidatorInfo( registered=reg, name=validator_name, id=self.public_key, signup_info=signup_info, transaction_id="a48b383fe5c577471640f49e1e5341a9ed40a992125207e7c5" "dbecb21d6f5cc1002726c7ab6ab6e5bb1d13c4b2b65004156f" "6afaa573ab7aa3a0c41ed5c74b8f").SerializeToString() address = self._key_to_address(self.public_key) return self._factory.create_set_request({address: data})
def create_set_request_validator_info(self, validator_name, reg): signup_info = self.create_signup_info(self.pubkey_hash, "000") data = ValidatorInfo( registered=reg, name=validator_name, id=self.public_key, signup_info=signup_info, transaction_id="0103c991863cae73630fe0a9b8988ad35840a3994ad010cd4c" "60f17ca70b2054115bd5bdb0233f745826a61db0d83a32365f" "e4026b39a731b0e457a5f09be194").SerializeToString() address = self._key_to_address(self.public_key) return self._factory.create_set_request({address: data})
def create_set_request_validator_info( self, validator_name, transaction_id, signup_info=None): if signup_info is None: signup_info = self.create_signup_info(self.public_key_hash, "000") data = ValidatorInfo( name=validator_name, id=self.public_key, signup_info=signup_info, transaction_id=transaction_id ).SerializeToString() address = self._key_to_address(self.public_key) return self._factory.create_set_request({address: data})
def do_get_missing_validator_state(): """Verify that retrieving missing validator state returns appropriate default values. """ state = consensus_state.ConsensusState() # Try to get a non-existent validator ID and verify it returns default # value validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) validator_state = state.get_validator_state(validator_info=validator_info)
def do_get_missing_validator_state(): state = consensus_state.ConsensusState() # Try to get a non-existent validator ID and verify it returns default # value validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) validator_state = \ state.get_validator_state(validator_info=validator_info) assert validator_state.key_block_claim_count == 0 assert validator_state.poet_public_key == 'key_001' assert validator_state.total_block_claim_count == 0
def test_has_validator_info(self): """Given a state view that contains a state entry for a given validator info, verify that the validator registry returns a true when has_validator_info is called with the validator's id.""" state_view = MockStateView({ to_address('my_id'): ValidatorInfo(name='my_validator', id='my_id', signup_info=SignUpInfo(poet_public_key='my_pubkey', proof_data='beleive me', anti_sybil_id='no sybil'), transaction_id="signature").SerializeToString() }) validator_registry_view = ValidatorRegistryView(state_view) self.assertTrue(validator_registry_view.has_validator_info('my_id'))
def _get_validator_state(state, validator_id=None): if validator_id is None: address = _get_address('validator_map') validator_state = ValidatorMap() else: validator_state = ValidatorInfo() address = _get_address(validator_id) try: entries_list = state.get([address], timeout=STATE_TIMEOUT_SEC) except FutureTimeoutError: LOGGER.warning('Timeout occured on state.get([%s])', address) raise InternalError('Unable to get {}'.format(address)) if len(entries_list) != 0: validator_state.ParseFromString(entries_list[0].data) return validator_state
def do_validator_did_claim_block(): state = consensus_state.ConsensusState() wait_certificate = mock.Mock() wait_certificate.duration = 3.1415 wait_certificate.local_mean = 5.0 poet_settings_view = mock.Mock() poet_settings_view.population_estimate_sample_size = 50 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) # Have a non-existent validator claim a block, which should cause the # consensus state to add and set statistics appropriately. state.validator_did_claim_block(validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) validator_state = \ state.get_validator_state(validator_info=validator_info) # Have the existing validator claim another block and verify that # the consensus and validator statistics are updated properly state.validator_did_claim_block(validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) validator_state = \ state.get_validator_state(validator_info=validator_info) # Have the existing validator claim another block, but with a new key, # and verify that the consensus and validator statistics are updated # properly validator_info.signup_info.poet_public_key = 'key_002' state.validator_did_claim_block(validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=poet_settings_view) validator_state = \ state.get_validator_state(validator_info=validator_info)
def test_get_missing_validator_state(self): """Verify that retrieving missing validator state returns appropriate default values. """ state = consensus_state.ConsensusState() # Try to get a non-existent validator ID and verify it returns default # value validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) validator_state = \ state.get_validator_state(validator_info=validator_info) self.assertEqual(validator_state.key_block_claim_count, 0) self.assertEqual(validator_state.poet_public_key, 'key_001') self.assertEqual(validator_state.total_block_claim_count, 0)
def test_signup_info_not_committed_within_allowed_delay( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher fails if a validator's signup info was not committed to the block chain within the allowed configured delay """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff', nonce='nonce')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state( committed_too_late=True) mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state mock_consensus_state_store.return_value.__getitem__.return_value = \ mock_consensus_state # Create mock key state mock_poet_key_state_store.return_value.__getitem__.return_value = \ mock.Mock( sealed_signup_data='sealed signup data', has_been_refreshed=False) # create mock_signup_info mock_signup_info.create_signup_info.return_value = \ mock.Mock( poet_public_key='poet public key', proof_data='proof data', anti_sybil_id='anti-sybil ID', sealed_signup_data='sealed signup data') mock_signup_info.block_id_to_nonce.return_value = 'nonce' mock_signup_info.unseal_signup_data.return_value = \ '00112233445566778899aabbccddeeff' # create mock_batch_publisher context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) signer = crypto_factory.new_signer(private_key) mock_batch_publisher = mock.Mock(identity_signer=signer) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.' 'LOGGER') as mock_logger: block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_publisher.initialize_block( block_header=mock_block.header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. self.assertTrue( any( 'Validator signup information not committed in a timely ' 'manner.' in call[0][0] for call in mock_logger.info.call_args_list)) # check that create.signup_info() was called to create # the validator registry payload with new set of keys self.assertTrue(mock_signup_info.create_signup_info.called)
def test_different_previous_block_id( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ When both current and new fork heads are valid PoET blocks with different previous block ids, the test verifies that the one with the higher aggregate local mean wins """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None # set new_mock_wait_certificate local_mean and duration mock_wait_certificate.local_mean = 0.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate, mock_wait_certificate ] mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='3', header_signature='00112233445566778899aabbccddeeff') fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # Subtest 1: when the current fork head has # the higher aggregate local mean # create a mock_cur_fork_consensus_state mock_cur_fork_consensus_state = mock.Mock() mock_cur_fork_consensus_state.aggregate_local_mean = 1.0 # create a mock_new_fork_consensus_state mock_new_fork_consensus_state = mock.Mock() mock_new_fork_consensus_state.aggregate_local_mean = 0.0 # set mock_consensus_state.consensus_state_for_block_id return # the current & new fork consensus states mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_new_fork_consensus_state] # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertFalse(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('Current fork aggregate local mean (%f) ' 'greater than new fork aggregate local mean' in message) # Subtest 2: when the new fork head has # the higher aggregate local mean # set mock_consensus_state.consensus_state_for_block_id return # the current & new fork consensus states mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_new_fork_consensus_state, mock_new_fork_consensus_state] # change the aggregate_local_mean values mock_cur_fork_consensus_state.aggregate_local_mean = 0.0 mock_new_fork_consensus_state.aggregate_local_mean = 1.0 # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork aggregate local mean (%f) ' 'greater than current fork aggregate local mean ' in message) # Subtest 3: when both the new & current fork heads have # the same aggregate local mean # set mock_consensus_state.consensus_state_for_block_id return # the current & new fork consensus states mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_new_fork_consensus_state, mock_new_fork_consensus_state] # set the aggregate_local_mean values equal mock_cur_fork_consensus_state.aggregate_local_mean = 1.0 mock_new_fork_consensus_state.aggregate_local_mean = 1.0 # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork header signature (%s) ' 'greater than current fork header signature (%s)' in message) # Subset 4: If we have gotten to this point and we have not chosen # a fork head yet, we are going to fall back # on using the block identifiers (header signatures). # The lexicographically larger one will be the chosen one. # create mock_new_fork_head with a smaller header_signature mock_smaller_header_signature = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='4', header_signature='00112233445566778899aabbccddee') # create a mock_smaller_header_signature_consensus_state mock_smaller_header_signature_consensus_state = mock.Mock() mock_smaller_header_signature_consensus_state.\ aggregate_local_mean = 0.0 mock_cur_fork_consensus_state.aggregate_local_mean = 0.0 mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_cur_fork_consensus_state, mock_smaller_header_signature_consensus_state] # check test when Current fork header signature is greater than # the new fork header signature with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertFalse(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_smaller_header_signature)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('Current fork header signature' '(%s) greater than new fork header signature (%s)' in message) # Subtest 5: Check when new header signature is greater than # the current fork header signature mock_consensus_state.consensus_state_for_block_id.side_effect = \ [mock_smaller_header_signature_consensus_state, mock_new_fork_consensus_state, mock_new_fork_consensus_state] mock_smaller_header_signature_consensus_state.\ aggregate_local_mean = 0.0 mock_new_fork_consensus_state.aggregate_local_mean = 0.0 # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_smaller_header_signature, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork header signature (%s) ' 'greater than current fork header signature (%s)' in message)
def test_both_valid_poet_blocks( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ If both current and new fork heads are valid PoET blocks, the test checks if they share the same immediate previous block, then the one with the smaller wait duration is chosen """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None # set new_mock_wait_certificate local_mean and duration mock_wait_certificate.local_mean = 0.0 mock_wait_certificate.duration = 1.0 # create a new_fork_mock_wait_certificate with a higher duration time new_fork_mock_wait_certificate = mock.Mock() new_fork_mock_wait_certificate.check_valid.return_value = None new_fork_mock_wait_certificate.local_mean = 0.0 new_fork_mock_wait_certificate.duration = 2.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head that is used in # poet_fork_resolver.compare() # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, new_fork_mock_wait_certificate ] mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head with same previous block id mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # Subtest 1: when current fork head has the smaller wait duration with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertFalse(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('Current fork wait duration (%f) ' 'less than new fork wait duration (%f)' in message) # Subtest 2: when new fork head has the smaller wait duration # change new_fork_mock_wait_certificate duration to a smaller value new_fork_mock_wait_certificate.duration = 0.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, new_fork_mock_wait_certificate] # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork wait duration (%f) ' 'less than current fork wait duration ' in message) # Subtest 3: when new & current fork heads have # the same wait duration # change new_fork_mock_wait_certificate duration to a smaller value new_fork_mock_wait_certificate.duration = 1.0 # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [mock_wait_certificate, new_fork_mock_wait_certificate] # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork header signature (%s) ' 'greater than current fork header signature (%s)' in message)
def test_cur_fork_head_not_poet_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store): """ Test verifies that if the current fork head is not a valid block, and if new_fork_head.previous_block_id == cur_fork_head.identifier then the new fork head switches consensus. Otherwise, raises the appropriate exception - trying to compare a PoET block to a non-PoET block that is not the direct predecessor """ # create a mock_validator_registry_view mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None # set mock_utils.deserialize_wait_certificate # to return a specific value for each fork_head that is used in # poet_fork_resolver.compare() # with cur_fork_head being deserialized first mock_utils.deserialize_wait_certificate.side_effect = \ [None, mock_wait_certificate, None, mock_wait_certificate] mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_cur_fork_head mock_cur_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543210', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') # create mock_new_fork_head mock_new_fork_header = \ mock.Mock( identifier='0123456789abcdefedcba9876543211', signer_public_key='90834587139405781349807435098745', previous_block_id='2', header_signature='00112233445566778899aabbccddeeff') fork_resolver = \ poet_fork_resolver.PoetForkResolver( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # Subtest 1: check that the test fails when the current # fork head is not a valid PoET block with self.assertRaises(TypeError) as cm: fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header) self.assertEqual( 'Trying to compare a PoET block to a non-PoET ' 'block that is not the direct predecessor', str(cm.exception)) # Subtest 2: check that if new_fork_head.previous_block_id # == cur_fork_head.identifier # then the new fork head switches consensus # modify mock_cur_fork_header.identifier mock_cur_fork_header.identifier = \ mock_new_fork_header.previous_block_id # check test with mock.patch('sawtooth_poet.poet_consensus.poet_fork_resolver.' 'LOGGER') as mock_logger: self.assertTrue(fork_resolver.compare_forks( cur_fork_head=mock_cur_fork_header, new_fork_head=mock_new_fork_header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('New fork head switches consensus to PoET' in message)
def test_block_publisher_doesnt_finalize_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_wait_certificate, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher doesn't finalize a candidate block that doesn't have a valid wait certificate. """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that pretends to fail mock_wait_certificate.create_wait_certificate.side_effect = \ ValueError('Unit test fake failure') # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState().create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # create mock_batch_publisher mock_batch_publisher = mock.Mock( identity_signing_key=signing.generate_private_key()) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.' 'LOGGER') as mock_logger: block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') with mock.patch('sawtooth_poet.poet_consensus.' 'poet_block_publisher.json') as _: self.assertFalse( block_publisher.finalize_block( block_header=mock_block.header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.error.call_args self.assertTrue('Failed to create wait certificate: ' in message)
def test_block_publisher_finalize_block( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_wait_certificate, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher finalizes the block, meaning that the candidate block is good and should be generated. """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid my_wait_certificate = mock.Mock() my_wait_certificate.check_valid.return_value = None mock_wait_certificate.create_wait_certificate.return_value = \ my_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState().create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state # create mock_batch_publisher mock_batch_publisher = mock.Mock( identity_signing_key=signing.generate_private_key()) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') with mock.patch('sawtooth_poet.poet_consensus.' 'poet_block_publisher.json') as _: self.assertTrue( block_publisher.finalize_block(block_header=mock_block.header))
def test_consensus_store_set_get(self, mock_lmdb): """Verify that externally visible state (len, etc.) of the consensus state store after set is expected. Verify that retrieving a previously set consensus state object results in the same values set. """ # Make LMDB return empty dict my_dict = {} mock_lmdb.return_value = my_dict mock_poet_settings_view = mock.Mock() mock_poet_settings_view.target_wait_time = 30.0 mock_poet_settings_view.initial_wait_time = 3000.0 mock_poet_settings_view.minimum_wait_time = 1.0 mock_poet_settings_view.population_estimate_sample_size = 50 store = \ consensus_state_store.ConsensusStateStore( data_dir=tempfile.gettempdir(), validator_id='0123456789abcdef') # Verify the length is zero and doesn't contain key self.assertEqual(len(store), 0) self.assertTrue('key' not in store) # Store consensus state state = consensus_state.ConsensusState() store['key'] = state # Verify the length and contains key self.assertEqual(len(store), 1) self.assertEqual(len(my_dict), 1) self.assertTrue('key' in store) self.assertTrue('key' in my_dict) # Retrieve the state and verify equality retrieved_state = store['key'] self.assertEqual(state.aggregate_local_mean, retrieved_state.aggregate_local_mean) self.assertEqual(state.total_block_claim_count, retrieved_state.total_block_claim_count) # Have a validator claim a block and update the store wait_certificate = mock.Mock() wait_certificate.duration = 3.1415 wait_certificate.local_mean = 5.0 validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wait_certificate, poet_settings_view=mock_poet_settings_view) store['key'] = state # Verify the length and contains key self.assertEqual(len(store), 1) self.assertEqual(len(my_dict), 1) self.assertTrue('key' in store) self.assertTrue('key' in my_dict) # Retrieve the state and verify equality retrieved_state = store['key'] self.assertEqual(state.aggregate_local_mean, retrieved_state.aggregate_local_mean) self.assertEqual(state.total_block_claim_count, retrieved_state.total_block_claim_count) validator_state = \ retrieved_state.get_validator_state( validator_info=validator_info) retrieved_validator_state = \ retrieved_state.get_validator_state( validator_info=validator_info) self.assertEqual(validator_state.key_block_claim_count, retrieved_validator_state.key_block_claim_count) self.assertEqual(validator_state.poet_public_key, retrieved_validator_state.poet_public_key) self.assertEqual(validator_state.total_block_claim_count, retrieved_validator_state.total_block_claim_count) # Delete the key and then verify length and does not contain key del store['key'] self.assertEqual(len(store), 0) self.assertEqual(len(my_dict), 0) self.assertTrue('key' not in store) self.assertTrue('key' not in my_dict) with self.assertRaises(KeyError): _ = store['key']
def test_create(self): # Need to create signup information and wait timer first signup_info = \ SignupInfo.create_signup_info( poet_enclave_module=self.poet_enclave_module, originator_public_key_hash=self._originator_public_key_hash, nonce=NULL_BLOCK_IDENTIFIER) # create mock_poet_enclave_wait_timer mock_poet_enclave_wait_timer = \ mock.Mock(validator_address='1060 W Addison Street', duration=1.0, previous_certificate_id=NULL_BLOCK_IDENTIFIER, local_mean=5.0, signature='00112233445566778899aabbccddeeff', serialized_timer=None, request_time=time.time()) # create mock_poet_enclave_wait_certificate mock_poet_enclave_wait_certificate = \ mock.Mock(duration=1.0, previous_certificate_id=NULL_BLOCK_IDENTIFIER, local_mean=5.0, request_time=time.time(), validator_address='1060 W Addison Street', nonce=NULL_BLOCK_IDENTIFIER, block_hash="Reader's Digest", signature='00112233445566778899aabbccddeeff', serialized_certificate='001122334455667' '78899aabbccddeeff') # create mock_poet_enclave_module mock_poet_enclave_module = mock.Mock() mock_poet_enclave_module.create_wait_timer.return_value = \ mock_poet_enclave_wait_timer # set the mock enclave wait certificate and wait timer to # have the same request_time mock_poet_enclave_wait_certificate.request_time = \ mock_poet_enclave_wait_timer.request_time # set the mock enclave wait certificate and wait timer to # have the same previous_certificate_id mock_poet_enclave_wait_certificate.previous_certificate_id = \ mock_poet_enclave_wait_timer.previous_certificate_id # set the identifier for mock_poet_enclave_wait_certificate mock_poet_enclave_wait_certificate.identifier.return_value = \ mock_poet_enclave_wait_certificate.previous_certificate_id[:16] mock_poet_enclave_module.create_wait_certificate.return_value = \ mock_poet_enclave_wait_certificate mock_poet_enclave_module.deserialize_wait_certificate.return_value = \ mock_poet_enclave_wait_certificate # create wait timer wt = \ WaitTimer.create_wait_timer( poet_enclave_module=mock_poet_enclave_module, validator_address='1660 Pennsylvania Avenue NW', previous_certificate_id=NULL_BLOCK_IDENTIFIER, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view) # Now we can create a wait certificate and verify that it correlates # to the wait timer we just created wc = \ WaitCertificate.create_wait_certificate( poet_enclave_module=mock_poet_enclave_module, wait_timer=wt, block_hash="Reader's Digest") self.assertIsNotNone(wc) self.assertEqual(wc.previous_certificate_id, wt.previous_certificate_id) self.assertAlmostEqual(wc.local_mean, wt.local_mean) self.assertAlmostEqual(wc.request_time, wt.request_time) self.assertAlmostEqual(wc.duration, wt.duration) self.assertEqual(wc.validator_address, wt.validator_address) self.assertEqual(wc.block_hash, "Reader's Digest") self.assertIsNotNone(wc.signature) self.assertIsNotNone(wc.identifier) # A newly-created wait certificate should be valid wc.check_valid(poet_enclave_module=mock_poet_enclave_module, previous_certificate_id=NULL_BLOCK_IDENTIFIER, poet_public_key=signup_info.poet_public_key, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view) validator_info = \ ValidatorInfo( id='validator_001', signup_info=SignUpInfo( poet_public_key='key_001')) self.mock_consensus_state.validator_did_claim_block( validator_info=validator_info, wait_certificate=wc, poet_settings_view=self.mock_poet_settings_view) # Create another wait certificate and verify it is valid also wt = \ WaitTimer.create_wait_timer( poet_enclave_module=mock_poet_enclave_module, validator_address='1660 Pennsylvania Avenue NW', previous_certificate_id=wc.identifier, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view) # Now we can create a wait certificate and verify that it correlates # to the wait timer we just created another_wc = \ WaitCertificate.create_wait_certificate( poet_enclave_module=mock_poet_enclave_module, wait_timer=wt, block_hash="Pepto Bismol") another_wc.check_valid(poet_enclave_module=mock_poet_enclave_module, previous_certificate_id=wc.identifier, poet_public_key=signup_info.poet_public_key, consensus_state=self.mock_consensus_state, poet_settings_view=self.mock_poet_settings_view)
def test_z_policy( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_poet_settings_view, mock_block_wrapper): """ Z Policy: Test verifies that PoET Block Publisher fails if a validator attempts to claim more blocks frequently than is allowed """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_wait_certificate that does nothing in check_valid mock_wait_certificate = mock.Mock() mock_wait_certificate.check_valid.return_value = None mock_utils.deserialize_wait_certificate.return_value = \ mock_wait_certificate # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state( claiming_too_frequently=True) mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state mock_consensus_state_store.return_value.__getitem__.return_value = \ mock_consensus_state # Create mock key state mock_poet_key_state_store.return_value.__getitem__.return_value = \ mock.Mock( sealed_signup_data='sealed signup data', has_been_refreshed=False) # create mock_signup_info mock_signup_info.unseal_signup_data.return_value = \ '00112233445566778899aabbccddeeff' # create mock_batch_publisher context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) signer = crypto_factory.new_signer(private_key) mock_batch_publisher = mock.Mock(identity_signer=signer) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # check test with mock.patch('sawtooth_poet.poet_consensus.poet_block_publisher.' 'LOGGER') as mock_logger: block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') self.assertFalse( block_publisher.initialize_block( block_header=mock_block.header)) # Could be a hack, but verify that the appropriate log message is # generated - so we at least have some faith that the failure was # because of what we are testing and not something else. I know # that this is fragile if the log message is changed, so would # accept any suggestions on a better way to verify that the # function fails for the reason we expect. (message, *_), _ = mock_logger.info.call_args self.assertTrue('is claiming blocks too ' 'frequently' in message)
def test_block_publisher_doesnt_claim_readiness( self, mock_utils, mock_validator_registry_view, mock_consensus_state, mock_poet_enclave_factory, mock_consensus_state_store, mock_poet_key_state_store, mock_signup_info, mock_wait_time, mock_poet_settings_view, mock_block_wrapper): """ Test verifies that PoET Block Publisher doesn't claims readiness if the wait timer hasn't expired """ # create a mock_validator_registry_view with # get_validator_info that does nothing mock_validator_registry_view.return_value.get_validator_info. \ return_value = \ ValidatorInfo( name='validator_001', id='validator_deadbeef', signup_info=SignUpInfo( poet_public_key='00112233445566778899aabbccddeeff')) # create a mock_consensus_state that returns a mock with # the following settings: mock_state = MockConsensusState.create_mock_consensus_state() mock_consensus_state.consensus_state_for_block_id.return_value = \ mock_state mock_consensus_state_store.return_value.__getitem__.return_value = \ mock_consensus_state # Create mock key state mock_poet_key_state_store.return_value.__getitem__.return_value = \ mock.Mock( sealed_signup_data='sealed signup data', has_been_refreshed=False) # create mock_signup_info mock_signup_info.unseal_signup_data.return_value = \ '00112233445566778899aabbccddeeff' # create mock_batch_publisher context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) signer = crypto_factory.new_signer(private_key) mock_batch_publisher = mock.Mock(identity_signer=signer) mock_block_cache = mock.MagicMock() mock_state_view_factory = mock.Mock() # create mock_block_header with the following fields mock_block = mock.Mock(identifier='0123456789abcdefedcba9876543210') mock_block.header.signer_public_key = \ '90834587139405781349807435098745' mock_block.header.previous_block_id = '2' mock_block.header.block_num = 1 mock_block.header.state_root_hash = '6' mock_block.header.batch_ids = '4' # create a mock_wait_timer that hasn't expired yet my_wait_time = mock.Mock() my_wait_time.has_expired.return_value = False mock_wait_time.create_wait_timer.return_value = my_wait_time # create mock_poet_enclave_module mock_poet_enclave_module = mock.Mock() mock_poet_enclave_module.return_value = \ mock_poet_enclave_factory.get_poet_enclave_module.return_value # check test block_publisher = \ poet_block_publisher.PoetBlockPublisher( block_cache=mock_block_cache, state_view_factory=mock_state_view_factory, batch_publisher=mock_batch_publisher, data_dir=self._temp_dir, config_dir=self._temp_dir, validator_id='validator_deadbeef') # check initialize_block() first to set wait_timer self.assertTrue( block_publisher.initialize_block( block_header=mock_block.header)) # check that block_publisher only claims readiness # when the wait_timer has expired self.assertFalse( block_publisher.check_publish_block( block_header=mock_block.header))
def _parse_validator_info(state_data): validator_info = ValidatorInfo() validator_info.ParseFromString(state_data) return validator_info