def get_databases(bind_network, data_dir, database=None): # Get the global state database to operate on global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('verifying state in %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) if database: LOGGER.debug('get_databases: OPEN ORIENTDB uri=%s', database) block_db = OrientDatabase( database, BlockStore.serialize_block, BlockStore.deserialize_block, indexes=BlockStore.create_index_configuration(), flag='c') LOGGER.debug('get_databases:OPEN ORIENT DB DONE %s', block_db) else: # Get the blockstore block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase( block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) return global_state_db, blockstore
def test_state_verifier(self): blockstore = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) global_state_db = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_state_verifier.lmdb'), indexes=MerkleDatabase.create_index_configuration()) precalculated_state_roots = [ "e35490eac6f77453675c3399da7efe451e791272bbc8cf1b032c75030fb455c3", "3a369eb951171895c00ba2ffd04bfa1ef98d6ee651f96a65ae3280cf8d67d5e7", "797e70e29915c9129f950b2084ed0e3c09246bd1e6c232571456f51ca85df340", ] signer = get_signer() populate_blockstore(blockstore, signer, precalculated_state_roots) verify_state( global_state_db, blockstore, "tcp://eth0:4004", "serial") # There is a bug in the shutdown code for some component this depends # on, which causes it to occassionally hang during shutdown. Just kill # the process for now. # pylint: disable=protected-access os._exit(0)
def setUp(self): self._temp_dir = tempfile.mkdtemp() self.database = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_state_view.lmdb'), indexes=MerkleDatabase.create_index_configuration(), _size=10 * 1024 * 1024)
def test_state_verifier(self): block_store_db = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_state_verifier_block_store_db.lmdb'), indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_store_db) global_state_db = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_state_verifier_global_state.lmdb'), indexes=MerkleDatabase.create_index_configuration()) precalculated_state_roots = [ "e35490eac6f77453675c3399da7efe451e791272bbc8cf1b032c75030fb455c3", "3a369eb951171895c00ba2ffd04bfa1ef98d6ee651f96a65ae3280cf8d67d5e7", "797e70e29915c9129f950b2084ed0e3c09246bd1e6c232571456f51ca85df340", ] signer = get_signer() populate_blockstore(blockstore, signer, precalculated_state_roots) verify_state(global_state_db, blockstore, "tcp://eth0:4004", "serial") # There is a bug in the shutdown code for some component this depends # on, which causes it to occassionally hang during shutdown. Just kill # the process for now. # pylint: disable=protected-access os._exit(0)
def setUp(self): self.dir = tempfile.mkdtemp() self.file = os.path.join(self.dir, 'merkle.lmdb') self.lmdb = NativeLmdbDatabase( self.file, indexes=MerkleDatabase.create_index_configuration(), _size=120 * 1024 * 1024) self.trie = MerkleDatabase(self.lmdb)
def test_empty_batch_file_should_produce_block( self, mock_scheduler_complete ): """ In this case, the genesis batch, even with an empty list of batches, should produce a genesis block. Also: - the genesis.batch file should be deleted - the block_chain_id file should be created and populated """ genesis_file = self._with_empty_batch_file() block_store = self.make_block_store() block_manager = BlockManager() block_manager.add_commit_store(block_store) state_database = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_genesis.lmdb'), indexes=MerkleDatabase.create_index_configuration(), _size=10 * 1024 * 1024) merkle_db = MerkleDatabase(state_database) ctx_mgr = Mock(name='ContextManager') ctx_mgr.get_squash_handler.return_value = Mock() ctx_mgr.get_first_root.return_value = merkle_db.get_merkle_root() txn_executor = Mock(name='txn_executor') completer = Mock('completer') completer.add_block = Mock('add_block') genesis_ctrl = GenesisController( context_manager=ctx_mgr, transaction_executor=txn_executor, completer=completer, block_store=block_store, state_view_factory=StateViewFactory(state_database), identity_signer=self._signer, block_manager=block_manager, data_dir=self._temp_dir, config_dir=self._temp_dir, chain_id_manager=ChainIdManager(self._temp_dir), batch_sender=Mock('batch_sender'), receipt_store=MagicMock()) on_done_fn = Mock(return_value='') genesis_ctrl.start(on_done_fn) self.assertEqual(False, os.path.exists(genesis_file)) self.assertEqual(True, block_store.chain_head is not None) self.assertEqual(1, on_done_fn.call_count) self.assertEqual(1, completer.add_block.call_count) self.assertEqual(block_store.chain_head.identifier, self._read_block_chain_id())
def test_empty_batch_file_should_produce_block( self, mock_scheduler_complete ): """ In this case, the genesis batch, even with an empty list of batches, should produce a genesis block. Also: - the genesis.batch file should be deleted - the block_chain_id file should be created and populated """ genesis_file = self._with_empty_batch_file() block_store = self.make_block_store() block_manager = BlockManager() block_manager.add_commit_store(block_store) state_database = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_genesis.lmdb'), indexes=MerkleDatabase.create_index_configuration(), _size=10 * 1024 * 1024) merkle_db = MerkleDatabase(state_database) ctx_mgr = Mock(name='ContextManager') ctx_mgr.get_squash_handler.return_value = Mock() ctx_mgr.get_first_root.return_value = merkle_db.get_merkle_root() txn_executor = Mock(name='txn_executor') completer = Mock('completer') completer.add_block = Mock('add_block') genesis_ctrl = GenesisController( context_manager=ctx_mgr, transaction_executor=txn_executor, completer=completer, block_store=block_store, state_view_factory=StateViewFactory(state_database), identity_signer=self._signer, block_manager=block_manager, data_dir=self._temp_dir, config_dir=self._temp_dir, chain_id_manager=ChainIdManager(self._temp_dir), batch_sender=Mock('batch_sender')) on_done_fn = Mock(return_value='') genesis_ctrl.start(on_done_fn) self.assertEqual(False, os.path.exists(genesis_file)) self.assertEqual(True, block_store.chain_head is not None) self.assertEqual(1, on_done_fn.call_count) self.assertEqual(1, completer.add_block.call_count) self.assertEqual(block_store.chain_head.identifier, self._read_block_chain_id())
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_cache = BlockCache(self.block_store) self.state_db = NativeLmdbDatabase( os.path.join(self.dir, "merkle.lmdb"), MerkleDatabase.create_index_configuration()) self.state_view_factory = NativeStateViewFactory(self.state_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) identity_private_key = context.new_random_private_key() self.identity_signer = crypto_factory.new_signer(identity_private_key) chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() chain_head = self.genesis_block self.block_manager.put([chain_head.block]) self.block_manager.persist( chain_head.block.header_signature, "commit_store") self.block_publisher = BlockPublisher( block_manager=self.block_manager, transaction_executor=MockTransactionExecutor(), transaction_committed=self.block_store.has_transaction, batch_committed=self.block_store.has_batch, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, chain_head=chain_head.block, identity_signer=self.identity_signer, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), batch_observers=[])
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_cache = BlockCache(self.block_store) self.state_db = NativeLmdbDatabase( os.path.join(self.dir, "merkle.lmdb"), MerkleDatabase.create_index_configuration()) self.state_view_factory = NativeStateViewFactory(self.state_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) identity_private_key = context.new_random_private_key() self.identity_signer = crypto_factory.new_signer(identity_private_key) chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() chain_head = self.genesis_block self.block_manager.put([chain_head.block]) self.block_manager.persist(chain_head.block.header_signature, "commit_store") self.block_publisher = BlockPublisher( block_manager=self.block_manager, transaction_executor=MockTransactionExecutor(), transaction_committed=self.block_store.has_transaction, batch_committed=self.block_store.has_batch, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, chain_head=chain_head.block, identity_signer=self.identity_signer, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), batch_observers=[])
def get_databases(bind_network, data_dir): # Get the global state database to operate on global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('verifying state in %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) # Get the blockstore block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) return global_state_db, blockstore
def setUp(self): self._temp_dir = tempfile.mkdtemp() database = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_config_view.lmdb'), indexes=MerkleDatabase.create_index_configuration(), _size=10 * 1024 * 1024) state_view_factory = StateViewFactory(database) self._settings_view_factory = SettingsViewFactory(state_view_factory) merkle_db = MerkleDatabase(database) self._current_root_hash = merkle_db.update({ TestSettingsView._address('my.setting'): TestSettingsView._setting_entry('my.setting', '10'), TestSettingsView._address('my.setting.list'): TestSettingsView._setting_entry('my.setting.list', '10,11,12'), TestSettingsView._address('my.other.list'): TestSettingsView._setting_entry('my.other.list', '13;14;15') }, virtual=False)
def compute_state_hashes_wo_scheduler(self, base_dir): """Creates a state hash from the state updates from each txn in a valid batch. Returns state_hashes (list of str): The merkle roots from state changes in 1 or more blocks in the yaml file. """ database = NativeLmdbDatabase( os.path.join(base_dir, 'compute_state_hashes_wo_scheduler.lmdb'), indexes=MerkleDatabase.create_index_configuration(), _size=10 * 1024 * 1024) tree = MerkleDatabase(database=database) state_hashes = [] updates = {} for batch in self._batches: b_id = batch.header_signature result = self._batch_results[b_id] if result.is_valid: for txn in batch.transactions: txn_id = txn.header_signature _, address_values, deletes = self._txn_execution[txn_id] batch_updates = {} for pair in address_values: batch_updates.update({a: pair[a] for a in pair.keys()}) # since this is entirely serial, any overwrite # of an address is expected and desirable. updates.update(batch_updates) for address in deletes: if address in updates: del updates[address] # This handles yaml files that have state roots in them if result.state_hash is not None: s_h = tree.update(set_items=updates, virtual=False) tree.set_merkle_root(merkle_root=s_h) state_hashes.append(s_h) if not state_hashes: state_hashes.append(tree.update(set_items=updates)) return state_hashes
def setUp(self): self._temp_dir = tempfile.mkdtemp() database = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_config_view.lmdb'), indexes=MerkleDatabase.create_index_configuration(), _size=10 * 1024 * 1024) state_view_factory = StateViewFactory(database) self._settings_view_factory = SettingsViewFactory(state_view_factory) merkle_db = MerkleDatabase(database) self._current_root_hash = merkle_db.update( { TestSettingsView._address('my.setting'): TestSettingsView._setting_entry('my.setting', '10'), TestSettingsView._address('my.setting.list'): TestSettingsView._setting_entry('my.setting.list', '10,11,12'), TestSettingsView._address('my.other.list'): TestSettingsView._setting_entry('my.other.list', '13;14;15') }, virtual=False)
def get_databases(bind_network, data_dir): # Get the global state database to operate on global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug( 'verifying state in %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) # Get the blockstore block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) return global_state_db, blockstore
def make_db_and_store(base_dir, size=3): """ Creates and returns three related objects for testing: * database - dict database with evolving state * store - blocks with root hashes corresponding to that state * roots - list of root hashes used in order With defaults, the values at the three roots look like this: * 0 - {'000...1': b'1'} * 1 - {'000...1': b'2', '000...2': b'4'} * 2 - {'000...1': b'3', '000...2': b'5', '000...3': b'7'} * 3 - {'000...1': b'4', '000...2': b'6', '000...3': b'8', '000...4': b'10'} """ database = NativeLmdbDatabase( os.path.join(base_dir, 'client_handlers_mock_db.lmdb'), indexes=MerkleDatabase.create_index_configuration(), _size=10 * 1024 * 1024) store = MockBlockStore(size=0) roots = [] merkle = MerkleDatabase(database) data = {} # Create all the keys that will be used. Keys are zero-padded hex strings # starting with '1'. keys = [format(i, 'x').zfill(70) for i in range(1, size + 1)] for i in range(1, size + 1): # Construct the state for this root data = {} for key_idx in range(i): key = keys[key_idx] # Calculate unique values based on the key and root val = i + (2 * key_idx) data[key] = str(val).encode() root = merkle.update(data, virtual=False) roots.append(root) store.add_block(str(i), root) return database, store, roots
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, network_public_key=None, network_private_key=None, roles=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase( block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_cache = BlockCache(block_store, keep_time=int(base_keep_time * 9 / 8), purge_frequency=30) # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=10, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor(max_workers=3, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect(bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster(component_service, block_store, receipt_store) # -- Setup P2P Networking -- # gossip = Gossip(network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1) completer = Completer(block_store, gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect(bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) consensus_notifier = ConsensusNotifier(consensus_service) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( block_cache=block_cache, state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( transaction_executor=transaction_executor, block_cache=block_cache, state_view_factory=state_view_factory, settings_cache=settings_cache, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, check_publish_block_frequency=0.1, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_publisher_batch_sender = block_publisher.batch_sender() block_validator = BlockValidator( block_cache=block_cache, state_view_factory=state_view_factory, transaction_executor=transaction_executor, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_cache=block_cache, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, state_pruning_block_depth=state_pruning_block_depth, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(block_publisher_batch_sender.send) completer.set_on_block_received(chain_controller.queue_block) completer.set_chain_has_block(chain_controller.has_block) # -- Register Message Handler -- # network_handlers.add(network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, chain_controller.has_block, block_publisher.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add(component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_cache=block_cache, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory) consensus_handlers.add(consensus_dispatcher, consensus_thread_pool, consensus_proxy) self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._chain_controller = chain_controller self._block_validator = block_validator
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, fork_cache_keep_time, network_public_key=None, network_private_key=None, roles=None, component_thread_pool_workers=10, network_thread_pool_workers=10, signature_thread_pool_workers=3): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing component_thread_pool_workers (int): number of workers in the component thread pool; defaults to 10. network_thread_pool_workers (int): number of workers in the network thread pool; defaults to 10. signature_thread_pool_workers (int): number of workers in the signature thread pool; defaults to 3. """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug( 'global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) native_state_view_factory = NativeStateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_manager = BlockManager() block_manager.add_commit_store(block_store) block_status_store = BlockValidationResultStore() # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=component_thread_pool_workers, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor( max_workers=network_thread_pool_workers, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor( max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor( max_workers=signature_thread_pool_workers, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect( bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect( bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store.has_batch) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster( component_service, block_store, receipt_store) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect( bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, max_future_callback_workers=10) consensus_registry = ConsensusRegistry() consensus_notifier = ConsensusNotifier( consensus_service, consensus_registry, identity_signer.get_public_key().as_hex()) # -- Setup P2P Networking -- # gossip = Gossip( network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, consensus_notifier, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1 ) consensus_notifier.set_gossip(gossip) completer = Completer( block_manager=block_manager, transaction_committed=block_store.has_transaction, get_committed_batch_by_id=block_store.get_batch, get_committed_batch_by_txn_id=( block_store.get_batch_by_transaction ), get_chain_head=lambda: unwrap_if_not_none(block_store.chain_head), gossip=gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) self._completer = completer block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver( to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( block_manager=block_manager, transaction_executor=transaction_executor, transaction_committed=block_store.has_transaction, batch_committed=block_store.has_batch, state_view_factory=native_state_view_factory, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_validator = BlockValidator( block_manager=block_manager, view_factory=native_state_view_factory, transaction_executor=transaction_executor, block_status_store=block_status_store, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_manager=block_manager, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, block_status_store=block_status_store, consensus_notifier=consensus_notifier, consensus_registry=consensus_registry, state_pruning_block_depth=state_pruning_block_depth, fork_cache_keep_time=fork_cache_keep_time, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, block_manager=block_manager, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender, receipt_store=receipt_store) responder = Responder(completer) completer.set_on_block_received(chain_controller.queue_block) self._incoming_batch_sender = None # -- Register Message Handler -- # network_handlers.add( network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, lambda block_id: block_id in block_manager, self.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add( component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher, identity_signer.get_public_key().as_hex()) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_manager=block_manager, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory, consensus_registry=consensus_registry, consensus_notifier=consensus_notifier) consensus_handlers.add( consensus_dispatcher, consensus_thread_pool, consensus_proxy, consensus_notifier) self._block_status_store = block_status_store self._consensus_notifier = consensus_notifier self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._consensus_registry = consensus_registry self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._block_validator = block_validator self._chain_controller = chain_controller self._block_validator = block_validator