def setUp(self): self._settings_view_factory = MockSettingsViewFactory() self._settings_cache = SettingsCache(self._settings_view_factory, ) self._settings_obsever = SettingsObserver( to_update=self._settings_cache.invalidate, forked=self._settings_cache.forked) # Make sure SettingsCache has populated settings self._settings_view_factory.add_setting("setting1", "test")
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, network_public_key=None, network_private_key=None, roles=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase( block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_cache = BlockCache(block_store, keep_time=int(base_keep_time * 9 / 8), purge_frequency=30) # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=10, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor(max_workers=3, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect(bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster(component_service, block_store, receipt_store) # -- Setup P2P Networking -- # gossip = Gossip(network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1) completer = Completer(block_store, gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect(bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) consensus_notifier = ConsensusNotifier(consensus_service) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( block_cache=block_cache, state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( transaction_executor=transaction_executor, block_cache=block_cache, state_view_factory=state_view_factory, settings_cache=settings_cache, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, check_publish_block_frequency=0.1, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_publisher_batch_sender = block_publisher.batch_sender() block_validator = BlockValidator( block_cache=block_cache, state_view_factory=state_view_factory, transaction_executor=transaction_executor, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_cache=block_cache, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, state_pruning_block_depth=state_pruning_block_depth, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(block_publisher_batch_sender.send) completer.set_on_block_received(chain_controller.queue_block) completer.set_chain_has_block(chain_controller.has_block) # -- Register Message Handler -- # network_handlers.add(network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, chain_controller.has_block, block_publisher.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add(component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_cache=block_cache, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory) consensus_handlers.add(consensus_dispatcher, consensus_thread_pool, consensus_proxy) self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._chain_controller = chain_controller self._block_validator = block_validator
class TestSettingsObserver(unittest.TestCase): def setUp(self): self._settings_view_factory = MockSettingsViewFactory() self._settings_cache = SettingsCache(self._settings_view_factory, ) self._settings_obsever = SettingsObserver( to_update=self._settings_cache.invalidate, forked=self._settings_cache.forked) # Make sure SettingsCache has populated settings self._settings_view_factory.add_setting("setting1", "test") def create_block(self, previous_block_id="0000000000000000"): block_header = BlockHeader(block_num=85, state_root_hash="0987654321fedcba", previous_block_id=previous_block_id) block = BlockWrapper( Block(header_signature="abcdef1234567890", header=block_header.SerializeToString())) return block def test_chain_update(self): """ Test that if there is no fork and only one value is updated, only that value is in validated in the catch. """ # Set up cache so it does not fork block1 = self.create_block() self._settings_obsever.chain_update(block1, []) self._settings_cache.get_setting("setting1", "state_root") self.assertNotEqual(self._settings_cache["setting1"], None) # Add next block and event that says network was updated. block2 = self.create_block("abcdef1234567890") event = Event( event_type="settings/update", attributes=[Event.Attribute(key="updated", value="setting1")]) receipts = TransactionReceipt(events=[event]) self._settings_obsever.chain_update(block2, [receipts]) # Check that only "network" was invalidated self.assertEqual(self._settings_cache["setting"], None) # check that the correct values can be fetched from state. settings_view = \ self._settings_view_factory.create_settings_view("state_root") self.assertEqual( self._settings_cache.get_setting("setting1", "state_root"), settings_view.get_setting("setting1")) def test_fork(self): """ Test that if there is a fork, all values in the cache will be invalidated and fetched from state. """ block = self.create_block() self._settings_obsever.chain_update(block, []) # Check that all items are invalid for key in self._settings_cache: self.assertEqual(self._settings_cache[key], None) # Check that the items can be fetched from state. settings_view = \ self._settings_view_factory.create_settings_view("state_root") self.assertEqual( self._settings_cache.get_setting("setting1", "state_root"), settings_view.get_setting("setting1"))