def verify_state(global_state_db, blockstore, bind_component, scheduler_type): """ Verify the state root hash of all blocks is in state and if not, reconstruct the missing state. Assumes that there are no "holes" in state, ie starting from genesis, state is present for all blocks up to some point and then not at all. If persist is False, this recomputes state in memory for all blocks in the blockstore and verifies the state root hashes. Raises: InvalidChainError: The chain in the blockstore is not valid. ExecutionError: An unrecoverable error was encountered during batch execution. """ state_view_factory = StateViewFactory(global_state_db) # Check if we should do state verification start_block, prev_state_root = search_for_present_state_root( blockstore, state_view_factory) if start_block is None: LOGGER.info( "Skipping state verification: chain head's state root is present") return LOGGER.info("Recomputing missing state from block %s with %s scheduler", start_block, scheduler_type) component_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Component') component_dispatcher = Dispatcher() component_service = Interconnect(bind_component, component_dispatcher, secured=False, reap=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) context_manager = ContextManager(global_state_db) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[]) component_service.set_check_connections( transaction_executor.check_connections) component_dispatcher.add_handler( validator_pb2.Message.TP_RECEIPT_ADD_DATA_REQUEST, tp_state_handlers.TpReceiptAddDataHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_EVENT_ADD_REQUEST, tp_state_handlers.TpEventAddHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_DELETE_REQUEST, tp_state_handlers.TpStateDeleteHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterValidationHandler(), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler( transaction_executor.processor_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler( transaction_executor.processor_manager), component_thread_pool) component_dispatcher.start() component_service.start() process_blocks(initial_state_root=prev_state_root, blocks=blockstore.get_block_iter(start_block=start_block, reverse=False), transaction_executor=transaction_executor, context_manager=context_manager, state_view_factory=state_view_factory) component_dispatcher.stop() component_service.stop() component_thread_pool.shutdown(wait=True) transaction_executor.stop() context_manager.stop()
def setUp(self): self._context_manager = ContextManager( dict_database.DictDatabase(), state_delta_store=Mock())
def __init__(self, bind_network, bind_component, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signing_key, scheduler_type, permissions, network_public_key=None, network_private_key=None, roles=None, metrics_registry=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signing_key (str): key validator uses for signing """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('global state database file is %s', global_state_db_filename) global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c') state_view_factory = StateViewFactory(global_state_db) # -- Setup State Delta Store -- # delta_db_filename = os.path.join( data_dir, 'state-deltas-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('state delta store file is %s', delta_db_filename) state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c') state_delta_store = StateDeltaStore(state_delta_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = LMDBNoLockDatabase(block_db_filename, 'c') block_store = BlockStore(block_db) block_cache = BlockCache(block_store, keep_time=300, purge_frequency=30) # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=10, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Network') sig_pool = InstrumentedThreadPoolExecutor(max_workers=3, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher(metrics_registry=metrics_registry) network_dispatcher = Dispatcher(metrics_registry=metrics_registry) # -- Setup Services -- # component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10, metrics_registry=metrics_registry) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect( bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=30, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, public_key=signing.generate_public_key(identity_signing_key), priv_key=identity_signing_key, roles=roles, metrics_registry=metrics_registry) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db, state_delta_store) batch_tracker = BatchTracker(block_store) executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker], metrics_registry=metrics_registry) component_service.set_check_connections(executor.check_connections) state_delta_processor = StateDeltaProcessor(component_service, state_delta_store, block_store) event_broadcaster = EventBroadcaster(component_service, block_store, receipt_store) # -- Setup P2P Networking -- # gossip = Gossip(network_service, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=3, maximum_peer_connectivity=10, topology_check_frequency=1) completer = Completer(block_store, gossip) block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory, block_store.chain_head_state_root) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( block_store=block_store, state_view_factory=state_view_factory, signing_key=identity_signing_key) block_publisher = BlockPublisher( transaction_executor=executor, block_cache=block_cache, state_view_factory=state_view_factory, block_sender=block_sender, batch_sender=batch_sender, squash_handler=context_manager.get_squash_handler(), chain_head=block_store.chain_head, identity_signing_key=identity_signing_key, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, check_publish_block_frequency=0.1, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory, metrics_registry=metrics_registry) chain_controller = ChainController( block_sender=block_sender, block_cache=block_cache, state_view_factory=state_view_factory, transaction_executor=executor, chain_head_lock=block_publisher.chain_head_lock, on_chain_updated=block_publisher.on_chain_updated, squash_handler=context_manager.get_squash_handler(), chain_id_manager=chain_id_manager, identity_signing_key=identity_signing_key, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, chain_observers=[ state_delta_processor, event_broadcaster, receipt_store, batch_tracker, identity_observer ], metrics_registry=metrics_registry) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_key=identity_signing_key, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(block_publisher.queue_batch) completer.set_on_block_received(chain_controller.queue_block) # -- Register Message Handler -- # network_handlers.add(network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, permission_verifier) component_handlers.add( component_dispatcher, gossip, context_manager, executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, state_delta_processor, state_delta_store, event_broadcaster, permission_verifier, component_thread_pool, sig_pool) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._executor = executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._chain_controller = chain_controller
def __init__(self, network_endpoint, component_endpoint, public_uri, peering, join_list, peer_list, data_dir, identity_signing_key): """Constructs a validator instance. Args: network_endpoint (str): the network endpoint component_endpoint (str): the component endpoint public_uri (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the join_list. In either mode, the validator will accept incoming peer requests up to max_peers. join_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory key_dir (str): path to the key directory """ db_filename = os.path.join(data_dir, 'merkle-{}.lmdb'.format( network_endpoint[-2:])) LOGGER.debug('database file is %s', db_filename) merkle_db = LMDBNoLockDatabase(db_filename, 'c') delta_db_filename = os.path.join(data_dir, 'state-deltas-{}.lmdb'.format( network_endpoint[-2:])) LOGGER.debug('state delta store file is %s', delta_db_filename) state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c') state_delta_store = StateDeltaStore(state_delta_db) context_manager = ContextManager(merkle_db, state_delta_store) self._context_manager = context_manager state_view_factory = StateViewFactory(merkle_db) block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format( network_endpoint[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = LMDBNoLockDatabase(block_db_filename, 'c') block_store = BlockStore(block_db) # setup network self._dispatcher = Dispatcher() thread_pool = ThreadPoolExecutor(max_workers=10) process_pool = ProcessPoolExecutor(max_workers=3) self._thread_pool = thread_pool self._process_pool = process_pool self._service = Interconnect(component_endpoint, self._dispatcher, secured=False, heartbeat=False, max_incoming_connections=20) executor = TransactionExecutor(service=self._service, context_manager=context_manager, config_view_factory=ConfigViewFactory( StateViewFactory(merkle_db))) self._executor = executor state_delta_processor = StateDeltaProcessor(self._service, state_delta_store, block_store) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_thread_pool = network_thread_pool self._network_dispatcher = Dispatcher() # Server public and private keys are hardcoded here due to # the decision to avoid having separate identities for each # validator's server socket. This is appropriate for a public # network. For a permissioned network with requirements for # server endpoint authentication at the network level, this can # be augmented with a local lookup service for side-band provided # endpoint, public_key pairs and a local configuration option # for 'server' side private keys. self._network = Interconnect( network_endpoint, dispatcher=self._network_dispatcher, zmq_identity=zmq_identity, secured=True, server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)', server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*', heartbeat=True, public_uri=public_uri, connection_timeout=30, max_incoming_connections=100) self._gossip = Gossip(self._network, public_uri=public_uri, peering_mode=peering, initial_join_endpoints=join_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=3, maximum_peer_connectivity=10, topology_check_frequency=1) completer = Completer(block_store, self._gossip) block_sender = BroadcastBlockSender(completer, self._gossip) batch_sender = BroadcastBatchSender(completer, self._gossip) chain_id_manager = ChainIdManager(data_dir) # Create and configure journal self._journal = Journal( block_store=block_store, state_view_factory=StateViewFactory(merkle_db), block_sender=block_sender, batch_sender=batch_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler(), identity_signing_key=identity_signing_key, chain_id_manager=chain_id_manager, state_delta_processor=state_delta_processor, data_dir=data_dir, check_publish_block_frequency=0.1, block_cache_purge_frequency=30, block_cache_keep_time=300 ) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_key=identity_signing_key, data_dir=data_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender ) responder = Responder(completer) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(executor.processors), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(executor.processors), thread_pool) # Set up base network handlers self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_CONNECT, ConnectHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_DISCONNECT, DisconnectHandler(network=self._network), network_thread_pool) # Set up gossip handlers self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, GetPeersRequestHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, GetPeersResponseHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(gossip=self._gossip), network_thread_pool) # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) # GOSSIP_MESSAGE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), process_pool) # GOSSIP_MESSAGE 3) Determines if we should broadcast the # message to our peers. It is important that this occur prior # to the sending of the message to the completer, as this step # relies on whether the gossip message has previously been # seen by the validator to determine whether or not forwarding # should occur self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler( gossip=self._gossip, completer=completer), network_thread_pool) # GOSSIP_MESSAGE 4) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_REQUEST, BlockResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, GossipBlockResponseHandler(), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, signature_verifier.GossipBlockResponseSignatureVerifier(), process_pool) # GOSSIP_BLOCK_RESPONSE 3) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, CompleterGossipBlockResponseHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, ResponderBlockResponseHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST, BatchByBatchIdResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST, BatchByTransactionIdResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, GossipBatchResponseHandler(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, signature_verifier.GossipBatchResponseSignatureVerifier(), process_pool) # GOSSIP_BATCH_RESPONSE 3) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, CompleterGossipBatchResponseHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, ResponderBatchResponseHandler(responder, self._gossip), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), process_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler( completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, client_handlers.BatchSubmitFinisher( self._journal.get_block_store(), completer.batch_cache), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST, client_handlers.BatchStatusRequest( self._journal.get_block_store(), completer.batch_cache), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest( merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest( merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST, client_handlers.BatchListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_GET_REQUEST, client_handlers.BatchGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST, client_handlers.TransactionListRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST, client_handlers.TransactionGetRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST, StateDeltaSubscriberHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST, StateDeltaUnsubscriberHandler(state_delta_processor), thread_pool)
def setUp(self): self._context_manager = ContextManager(dict_database.DictDatabase())
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, fork_cache_keep_time, network_public_key=None, network_private_key=None, roles=None, component_thread_pool_workers=10, network_thread_pool_workers=10, signature_thread_pool_workers=3): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing component_thread_pool_workers (int): number of workers in the component thread pool; defaults to 10. network_thread_pool_workers (int): number of workers in the network thread pool; defaults to 10. signature_thread_pool_workers (int): number of workers in the signature thread pool; defaults to 3. """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) native_state_view_factory = NativeStateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_manager = BlockManager() block_manager.add_commit_store(block_store) block_status_store = BlockValidationResultStore() # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=component_thread_pool_workers, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor( max_workers=network_thread_pool_workers, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor( max_workers=signature_thread_pool_workers, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect(bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store.has_batch) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster(component_service, block_store, receipt_store) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect(bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, max_future_callback_workers=10) consensus_registry = ConsensusRegistry() consensus_notifier = ConsensusNotifier( consensus_service, consensus_registry, identity_signer.get_public_key().as_hex()) # -- Setup P2P Networking -- # gossip = Gossip(network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, consensus_notifier, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1) consensus_notifier.set_gossip(gossip) completer = Completer( block_manager=block_manager, transaction_committed=block_store.has_transaction, get_committed_batch_by_id=block_store.get_batch, get_committed_batch_by_txn_id=( block_store.get_batch_by_transaction), get_chain_head=lambda: unwrap_if_not_none(block_store.chain_head), gossip=gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) self._completer = completer block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( block_manager=block_manager, transaction_executor=transaction_executor, transaction_committed=block_store.has_transaction, batch_committed=block_store.has_batch, state_view_factory=native_state_view_factory, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_validator = BlockValidator( block_manager=block_manager, view_factory=native_state_view_factory, transaction_executor=transaction_executor, block_status_store=block_status_store, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_manager=block_manager, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, block_status_store=block_status_store, consensus_notifier=consensus_notifier, consensus_registry=consensus_registry, state_pruning_block_depth=state_pruning_block_depth, fork_cache_keep_time=fork_cache_keep_time, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, completer=completer, block_manager=block_manager, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender, receipt_store=receipt_store) responder = Responder(completer) completer.set_on_block_received(chain_controller.queue_block) self._incoming_batch_sender = None # -- Register Message Handler -- # network_handlers.add(network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, lambda block_id: block_id in block_manager, self.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add(component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher, identity_signer.get_public_key().as_hex()) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_manager=block_manager, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory, consensus_registry=consensus_registry, consensus_notifier=consensus_notifier) consensus_handlers.add(consensus_dispatcher, consensus_thread_pool, consensus_proxy, consensus_notifier) self._block_status_store = block_status_store self._consensus_notifier = consensus_notifier self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._consensus_registry = consensus_registry self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._block_validator = block_validator self._chain_controller = chain_controller self._block_validator = block_validator
def __init__(self, network_endpoint, component_endpoint, peer_list, data_dir, identity_signing_key): """Constructs a validator instance. Args: network_endpoint (str): the network endpoint component_endpoint (str): the component endpoint peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory key_dir (str): path to the key directory """ db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:])) LOGGER.debug('database file is %s', db_filename) merkle_db = LMDBNoLockDatabase(db_filename, 'n') context_manager = ContextManager(merkle_db) state_view_factory = StateViewFactory(merkle_db) block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = LMDBNoLockDatabase(block_db_filename, 'n') block_store = BlockStore(block_db) # setup network self._dispatcher = Dispatcher() thread_pool = ThreadPoolExecutor(max_workers=10) process_pool = ProcessPoolExecutor(max_workers=3) self._service = Interconnect(component_endpoint, self._dispatcher, secured=False, heartbeat=False, max_incoming_connections=20) executor = TransactionExecutor(service=self._service, context_manager=context_manager, config_view_factory=ConfigViewFactory( StateViewFactory(merkle_db))) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_dispatcher = Dispatcher() # Server public and private keys are hardcoded here due to # the decision to avoid having separate identities for each # validator's server socket. This is appropriate for a public # network. For a permissioned network with requirements for # server endpoint authentication at the network level, this can # be augmented with a local lookup service for side-band provided # endpoint, public_key pairs and a local configuration option # for 'server' side private keys. self._network = Interconnect( network_endpoint, dispatcher=self._network_dispatcher, zmq_identity=zmq_identity, secured=True, server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)', server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*', heartbeat=True, connection_timeout=30, max_incoming_connections=100) self._gossip = Gossip(self._network, initial_peer_endpoints=peer_list) completer = Completer(block_store, self._gossip) block_sender = BroadcastBlockSender(completer, self._gossip) batch_sender = BroadcastBatchSender(completer, self._gossip) chain_id_manager = ChainIdManager(data_dir) # Create and configure journal self._journal = Journal( block_store=block_store, state_view_factory=StateViewFactory(merkle_db), block_sender=block_sender, batch_sender=batch_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler(), identity_signing_key=identity_signing_key, chain_id_manager=chain_id_manager, data_dir=data_dir, check_publish_block_frequency=0.1, block_cache_purge_frequency=30, block_cache_keep_time=300) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_key=identity_signing_key, data_dir=data_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(executor.processors), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(executor.processors), thread_pool) # Set up base network handlers self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_CONNECT, ConnectHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_DISCONNECT, DisconnectHandler(network=self._network), network_thread_pool) # Set up gossip handlers self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), process_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler(completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_REQUEST, BlockResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST, BatchByBatchIdResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST, BatchByTransactionIdResponderHandler(responder, self._gossip), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), process_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler(completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, client_handlers.BatchSubmitFinisher( self._journal.get_block_store(), completer.batch_cache), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST, client_handlers.BatchStatusRequest(self._journal.get_block_store(), completer.batch_cache), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST, client_handlers.BatchListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_GET_REQUEST, client_handlers.BatchGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool)
def test_valid_batch_invalid_batch(self): """Tests the squash function. That the correct hash is being used for each txn and that the batch ending state hash is being set. Basically: 1. Adds two batches, one where all the txns are valid, and one where one of the txns is invalid. 2. Run through the scheduler executor interaction as txns are processed. 3. Verify that the valid state root is obtained through the squash function. 4. Verify that correct batch statuses are set """ private_key = bitcoin.random_key() public_key = bitcoin.encode_pubkey( bitcoin.privkey_to_pubkey(private_key), "hex") context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) # 1) batch_signatures = [] for names in [['a', 'b'], ['invalid', 'c']]: batch_txns = [] for name in names: txn = create_transaction(name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) batch = create_batch(transactions=batch_txns, private_key=private_key, public_key=public_key) batch_signatures.append(batch.header_signature) scheduler.add_batch(batch) scheduler.finalize() # 2) sched1 = iter(scheduler) invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest() while not scheduler.complete(block=False): txn_info = next(sched1) txn_header = transaction_pb2.TransactionHeader() txn_header.ParseFromString(txn_info.txn.header) inputs_or_outputs = list(txn_header.inputs) c_id = context_manager.create_context(txn_info.state_hash, inputs_or_outputs, inputs_or_outputs) if txn_header.payload_sha512 == invalid_payload: scheduler.set_transaction_execution_result( txn_info.txn.header_signature, False, c_id) else: context_manager.set(c_id, [{inputs_or_outputs[0]: 1}]) scheduler.set_transaction_execution_result( txn_info.txn.header_signature, True, c_id) sched2 = iter(scheduler) # 3) txn_info_a = next(sched2) self.assertEquals(first_state_root, txn_info_a.state_hash) txn_a_header = transaction_pb2.TransactionHeader() txn_a_header.ParseFromString(txn_info_a.txn.header) inputs_or_outputs = list(txn_a_header.inputs) address_a = inputs_or_outputs[0] c_id_a = context_manager.create_context(first_state_root, inputs_or_outputs, inputs_or_outputs) context_manager.set(c_id_a, [{address_a: 1}]) state_root2 = context_manager.commit_context([c_id_a], virtual=False) txn_info_b = next(sched2) self.assertEquals(txn_info_b.state_hash, state_root2) txn_b_header = transaction_pb2.TransactionHeader() txn_b_header.ParseFromString(txn_info_b.txn.header) inputs_or_outputs = list(txn_b_header.inputs) address_b = inputs_or_outputs[0] c_id_b = context_manager.create_context(state_root2, inputs_or_outputs, inputs_or_outputs) context_manager.set(c_id_b, [{address_b: 1}]) state_root3 = context_manager.commit_context([c_id_b], virtual=False) txn_infoInvalid = next(sched2) self.assertEquals(txn_infoInvalid.state_hash, state_root3) txn_info_c = next(sched2) self.assertEquals(txn_info_c.state_hash, state_root3) # 4) batch1_result = scheduler.get_batch_execution_result( batch_signatures[0]) self.assertTrue(batch1_result.is_valid) self.assertEquals(batch1_result.state_hash, state_root3) batch2_result = scheduler.get_batch_execution_result( batch_signatures[1]) self.assertFalse(batch2_result.is_valid) self.assertIsNone(batch2_result.state_hash)
def __init__(self, bind_network, bind_component, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signing_key, scheduler_type, permissions, network_public_key=None, network_private_key=None, roles=None, metrics_registry=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signing_key (str): key validator uses for signing """ db_filename = os.path.join(data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('database file is %s', db_filename) merkle_db = LMDBNoLockDatabase(db_filename, 'c') delta_db_filename = os.path.join( data_dir, 'state-deltas-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('state delta store file is %s', delta_db_filename) state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c') receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') state_delta_store = StateDeltaStore(state_delta_db) receipt_store = TransactionReceiptStore(receipt_db) context_manager = ContextManager(merkle_db, state_delta_store) self._context_manager = context_manager state_view_factory = StateViewFactory(merkle_db) block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = LMDBNoLockDatabase(block_db_filename, 'c') block_store = BlockStore(block_db) batch_tracker = BatchTracker(block_store) # setup network self._dispatcher = Dispatcher(metrics_registry=metrics_registry) thread_pool = ThreadPoolExecutor(max_workers=10) sig_pool = ThreadPoolExecutor(max_workers=3) self._thread_pool = thread_pool self._sig_pool = sig_pool self._service = Interconnect(bind_component, self._dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10, metrics_registry=metrics_registry) executor = TransactionExecutor( service=self._service, context_manager=context_manager, settings_view_factory=SettingsViewFactory( StateViewFactory(merkle_db)), scheduler_type=scheduler_type, invalid_observers=[batch_tracker], metrics_registry=metrics_registry) self._executor = executor self._service.set_check_connections(executor.check_connections) state_delta_processor = StateDeltaProcessor(self._service, state_delta_store, block_store) event_broadcaster = EventBroadcaster(self._service, block_store, receipt_store) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_thread_pool = network_thread_pool self._network_dispatcher = Dispatcher( metrics_registry=metrics_registry) secure = False if network_public_key is not None and network_private_key is not None: secure = True self._network = Interconnect( bind_network, dispatcher=self._network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=30, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, public_key=signing.generate_pubkey(identity_signing_key), priv_key=identity_signing_key, roles=roles, metrics_registry=metrics_registry) self._gossip = Gossip(self._network, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=3, maximum_peer_connectivity=10, topology_check_frequency=1) completer = Completer(block_store, self._gossip) block_sender = BroadcastBlockSender(completer, self._gossip) batch_sender = BroadcastBatchSender(completer, self._gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(merkle_db)) id_cache = IdentityCache(identity_view_factory, block_store.chain_head_state_root) permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) # Create and configure journal self._journal = Journal( block_store=block_store, state_view_factory=StateViewFactory(merkle_db), block_sender=block_sender, batch_sender=batch_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler(), identity_signing_key=identity_signing_key, chain_id_manager=chain_id_manager, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, check_publish_block_frequency=0.1, block_cache_purge_frequency=30, block_cache_keep_time=300, batch_observers=[batch_tracker], chain_observers=[ state_delta_processor, event_broadcaster, receipt_store, batch_tracker, identity_observer ], metrics_registry=metrics_registry) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_key=identity_signing_key, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.TP_ADD_RECEIPT_DATA_REQUEST, tp_state_handlers.TpAddReceiptDataHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_ADD_EVENT_REQUEST, tp_state_handlers.TpAddEventHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_DEL_REQUEST, tp_state_handlers.TpStateDeleteHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(executor.processors), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(executor.processors), thread_pool) # Set up base network handlers self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_PING, PingHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_CONNECT, ConnectHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_DISCONNECT, DisconnectHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.AUTHORIZATION_VIOLATION, AuthorizationViolationHandler(network=self._network, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.AUTHORIZATION_TRUST_REQUEST, AuthorizationTrustRequestHandler( network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.AUTHORIZATION_CHALLENGE_REQUEST, AuthorizationChallengeRequestHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.AUTHORIZATION_CHALLENGE_SUBMIT, AuthorizationChallengeSubmitHandler( network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) # Set up gossip handlers self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, GetPeersRequestHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, GetPeersResponseHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(gossip=self._gossip), network_thread_pool) # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) # GOSSIP_MESSAGE 2) Verify Network Permissions self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) # GOSSIP_MESSAGE 3) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), sig_pool) # GOSSIP_MESSAGE 4) Verifies batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, structure_verifier.GossipHandlerStructureVerifier(), network_thread_pool) # GOSSIP_MESSAGE 4) Verifies that the node is allowed to publish a # block self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, NetworkConsensusPermissionHandler( network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) # GOSSIP_MESSAGE 5) Determines if we should broadcast the # message to our peers. It is important that this occur prior # to the sending of the message to the completer, as this step # relies on whether the gossip message has previously been # seen by the validator to determine whether or not forwarding # should occur self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler(gossip=self._gossip, completer=completer), network_thread_pool) # GOSSIP_MESSAGE 6) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler(completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_REQUEST, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_REQUEST, BlockResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, GossipBlockResponseHandler(), network_thread_pool) # GOSSIP_MESSAGE 2) Verify Network Permissions self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 3) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, signature_verifier.GossipBlockResponseSignatureVerifier(), sig_pool) # GOSSIP_BLOCK_RESPONSE 4) Check batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, structure_verifier.GossipBlockResponseStructureVerifier(), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 5) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, CompleterGossipBlockResponseHandler(completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, ResponderBlockResponseHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST, BatchByBatchIdResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST, BatchByTransactionIdResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, NetworkPermissionHandler(network=self._network, permission_verifier=permission_verifier, gossip=self._gossip), network_thread_pool) # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, GossipBatchResponseHandler(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, signature_verifier.GossipBatchResponseSignatureVerifier(), sig_pool) # GOSSIP_BATCH_RESPONSE 3) Check batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, structure_verifier.GossipBatchResponseStructureVerifier(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 4) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, CompleterGossipBatchResponseHandler(completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, ResponderBatchResponseHandler(responder, self._gossip), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, BatchListPermissionVerifier( permission_verifier=permission_verifier), sig_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), sig_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, structure_verifier.BatchListStructureVerifier(), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler(completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, client_handlers.BatchSubmitFinisher(batch_tracker), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST, client_handlers.BatchStatusRequest(batch_tracker), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST, client_handlers.BatchListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_GET_REQUEST, client_handlers.BatchGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST, client_handlers.TransactionListRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST, client_handlers.TransactionGetRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_RECEIPT_GET_REQUEST, ClientReceiptGetRequestHandler(receipt_store), thread_pool) # State Delta Subscription Handlers self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST, StateDeltaSubscriberValidationHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST, StateDeltaAddSubscriberHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST, StateDeltaUnsubscriberHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST, StateDeltaGetEventsHandler(block_store, state_delta_store), thread_pool) # Client Events Handlers self._dispatcher.add_handler( validator_pb2.Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST, ClientEventsSubscribeValidationHandler(event_broadcaster), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST, ClientEventsSubscribeHandler(event_broadcaster), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_EVENTS_UNSUBSCRIBE_REQUEST, ClientEventsUnsubscribeHandler(event_broadcaster), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_EVENTS_GET_REQUEST, ClientEventsGetRequestHandler(event_broadcaster), thread_pool)
def __init__(self, network_endpoint, component_endpoint, peer_list, data_dir, key_dir): """Constructs a validator instance. Args: network_endpoint (str): the network endpoint component_endpoint (str): the component endpoint peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory key_dir (str): path to the key directory """ db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:])) LOGGER.debug('database file is %s', db_filename) merkle_db = LMDBNoLockDatabase(db_filename, 'n') context_manager = ContextManager(merkle_db) block_db_filename = os.path.join(data_dir, 'block.lmdb') LOGGER.debug('block store file is %s', block_db_filename) # block_store = LMDBNoLockDatabase(block_db_filename, 'n') block_store = {} # this is not currently being used but will be something like this # in the future, when Journal takes a block_store that isn't a dict # setup network self._dispatcher = Dispatcher() completer = Completer(block_store) thread_pool = ThreadPoolExecutor(max_workers=10) process_pool = ProcessPoolExecutor(max_workers=3) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._service = Interconnect(component_endpoint, self._dispatcher, secured=False) executor = TransactionExecutor(self._service, context_manager) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(executor.processors), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(executor.processors), thread_pool) identity = hashlib.sha512(time.time().hex().encode()).hexdigest()[:23] identity_signing_key = Validator.load_identity_signing_key( key_dir, DEFAULT_KEY_NAME) network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_dispatcher = Dispatcher() # Server public and private keys are hardcoded here due to # the decision to avoid having separate identities for each # validator's server socket. This is appropriate for a public # network. For a permissioned network with requirements for # server endpoint authentication at the network level, this can # be augmented with a local lookup service for side-band provided # endpoint, public_key pairs and a local configuration option # for 'server' side private keys. self._network = Interconnect( network_endpoint, dispatcher=self._network_dispatcher, identity=identity, peer_connections=peer_list, secured=True, server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)', server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*') self._gossip = Gossip(self._network) block_sender = BroadcastBlockSender(completer, self._gossip) self._network_dispatcher.add_handler(validator_pb2.Message.GOSSIP_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), process_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler(completer), network_thread_pool) # Create and configure journal self._journal = Journal( consensus_module=dev_mode_consensus, block_store=block_store, state_view_factory=StateViewFactory(merkle_db), block_sender=block_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler()) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, identity_key=identity_signing_key, data_dir=data_dir) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), process_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler(completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool)
def setUp(self): self._context_manager = ContextManager(dict_database.DictDatabase()) self._context = create_context('secp256k1') self._crypto_factory = CryptoFactory(self._context)
def test_add_batch_after_empty_iteration(self): """Tests that iterations will continue as result of add_batch(). This test calls next() on a scheduler iterator in a separate thread called the IteratorThread. The test waits until the IteratorThread is waiting in next(); internal to the scheduler, it will be waiting on a condition variable as there are no transactions to return and the scheduler is not finalized. Then, the test continues by running add_batch(), which should cause the next() running in the IterableThread to return a transaction. This demonstrates the scheduler's ability to wait on an empty iterator but continue as transactions become available via add_batch. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) # Create a basic transaction and batch. txn = create_transaction(name='a', private_key=private_key, public_key=public_key) batch = create_batch(transactions=[txn], private_key=private_key, public_key=public_key) # This class is used to run the scheduler's iterator. class IteratorThread(threading.Thread): def __init__(self, iterable): threading.Thread.__init__(self) self._iterable = iterable self.ready = False self.condition = threading.Condition() self.txn_info = None def run(self): # Even with this lock here, there is a race condition between # exit of the lock and entry into the iterable. That is solved # by sleep later in the test. with self.condition: self.ready = True self.condition.notify() txn_info = next(self._iterable) with self.condition: self.txn_info = txn_info self.condition.notify() # This is the iterable we are testing, which we will use in the # IteratorThread. We also use it in this thread below to test # for StopIteration. iterable = iter(scheduler) # Create and startup thread. thread = IteratorThread(iterable=iterable) thread.start() # Pause here to make sure the thread is absolutely as far along as # possible; in other words, right before we call next() in it's run() # method. When this returns, there should be very little time until # the iterator is blocked on a condition variable. with thread.condition: while not thread.ready: thread.condition.wait() # May the daemons stay away during this dark time, and may we be # forgiven upon our return. time.sleep(1) # At this point, the IteratorThread should be waiting next(), so we go # ahead and give it a batch. scheduler.add_batch(batch) # If all goes well, thread.txn_info will get set to the result of the # next() call. If not, it will timeout and thread.txn_info will be # empty. with thread.condition: if thread.txn_info is None: thread.condition.wait(5) # If thread.txn_info is empty, the test failed as iteration did not # continue after add_batch(). self.assertIsNotNone(thread.txn_info, "iterable failed to return txn") self.assertEquals(txn.payload, thread.txn_info.txn.payload) # Continue with normal shutdown/cleanup. scheduler.finalize() scheduler.set_transaction_execution_result(txn.header_signature, False, None) with self.assertRaises(StopIteration): next(iterable)
def verify_state(bind_network, bind_component, scheduler_type, data_dir=None): """ Verify the state root hash of all blocks is in state and if not, reconstruct the missing state. Assumes that there are no "holes" in state, ie starting from genesis, state is present for all blocks up to some point and then not at all. If persist is False, this recomputes state in memory for all blocks in the blockstore and verifies the state root hashes. Raises: InvalidChainError: The chain in the blockstore is not valid. ExecutionError: An unrecoverable error was encountered during batch execution. """ # Get the global state database to operate on if data_dir is not None: global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('verifying state in %s', global_state_db_filename) global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c') else: global_state_db = DictDatabase() state_view_factory = StateViewFactory(global_state_db) # Get the blockstore block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase(block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) # Check if we should do state verification start_block, prev_state_root = search_for_present_state_root( blockstore, state_view_factory) if start_block is None: LOGGER.info( "Skipping state verification: chain head's state root is present") return LOGGER.info("Recomputing missing state from block %s with %s scheduler", start_block, scheduler_type) component_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Component') component_dispatcher = Dispatcher() component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) context_manager = ContextManager(global_state_db) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[]) component_service.set_check_connections( transaction_executor.check_connections) component_dispatcher.add_handler( validator_pb2.Message.TP_RECEIPT_ADD_DATA_REQUEST, tp_state_handlers.TpReceiptAddDataHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_EVENT_ADD_REQUEST, tp_state_handlers.TpEventAddHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_DELETE_REQUEST, tp_state_handlers.TpStateDeleteHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler( transaction_executor.processors), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler( transaction_executor.processors), component_thread_pool) component_dispatcher.start() component_service.start() process_blocks(initial_state_root=prev_state_root, blocks=blockstore.get_block_iter(start_block=start_block, reverse=False), transaction_executor=transaction_executor, context_manager=context_manager, state_view_factory=state_view_factory) component_dispatcher.stop() component_service.stop() component_thread_pool.shutdown(wait=True) transaction_executor.stop() context_manager.stop()
def setUp(self): self.context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = self.context_manager.get_squash_handler() self.first_state_root = self.context_manager.get_first_root() self.scheduler = SerialScheduler(squash_handler, self.first_state_root)
def __init__(self, network_endpoint, component_endpoint, peer_list): data_dir = os.path.expanduser('~') db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:])) LOGGER.debug('database file is %s', db_filename) lmdb = LMDBNoLockDatabase(db_filename, 'n') context_manager = ContextManager(lmdb) block_db_filename = os.path.join(data_dir, 'block.lmdb') LOGGER.debug('block store file is %s', block_db_filename) block_store = {} # block_store = LMDBNoLockDatabase(block_db_filename, 'n') block_store = {} # this is not currently being used but will be something like this # in the future, when Journal takes a block_store that isn't a dict # setup network self._dispatcher = Dispatcher() completer = Completer(block_store) thread_pool = ThreadPoolExecutor(max_workers=10) process_pool = ProcessPoolExecutor(max_workers=3) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequestHandler(lmdb), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequestHandler(lmdb), thread_pool) self._service = Interconnect(component_endpoint, self._dispatcher) executor = TransactionExecutor(self._service, context_manager) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, ProcessorRegisterHandler(executor.processors), thread_pool) identity = hashlib.sha512(time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_dispatcher = Dispatcher() self._network = Interconnect(network_endpoint, dispatcher=self._network_dispatcher, identity=identity, peer_connections=peer_list) self._gossip = Gossip(self._network) block_sender = BroadcastBlockSender(completer, self._gossip) self._network_dispatcher.add_handler(validator_pb2.Message.GOSSIP_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), process_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler(completer), network_thread_pool) # Create and configure journal self._journal = Journal( consensus=dev_mode_consensus, block_store=block_store, block_sender=block_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler()) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, data_dir=data_dir) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), process_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler(completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequestHandler(lmdb), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequestHandler(lmdb), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequestHandler( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequestHandler( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequestHandler( self._journal.get_current_root), thread_pool)
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, max_dag_branch, network_public_key=None, network_private_key=None, roles=None, metrics_registry=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('global state database file is %s', global_state_db_filename) global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c') state_view_factory = StateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) #receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_db = IndexedDatabase( receipt_db_filename, TransactionReceiptStore.serialize_receipt, TransactionReceiptStore.deserialize_receipt, flag='c', dupsort=True, indexes=TransactionReceiptStore.create_index_configuration()) receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase( block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # for DAG set mercle db block_store.set_global_state_db(global_state_db) block_cache = BlockCache(block_store, keep_time=300, purge_frequency=30) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_manager = BlockManager() block_manager.add_store("commit_store", block_store) # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=10, name='Component', metrics_registry=metrics_registry) network_thread_pool = InstrumentedThreadPoolExecutor( max_workers=10, name='Network', metrics_registry=metrics_registry) sig_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Signature', metrics_registry=metrics_registry) # -- Setup Dispatchers -- # self._metrics_registry = metrics_registry component_dispatcher = Dispatcher(metrics_registry=metrics_registry) network_dispatcher = Dispatcher(metrics_registry=metrics_registry) # -- Setup Services -- # component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10, metrics_registry=metrics_registry) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect(bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles, metrics_registry=metrics_registry) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker], metrics_registry=metrics_registry) component_service.set_check_connections(executor.check_connections) event_broadcaster = EventBroadcaster(component_service, block_store, receipt_store) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus', metrics_registry=metrics_registry) consensus_dispatcher = Dispatcher() consensus_service = Interconnect(bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) consensus_notifier = ConsensusNotifier(consensus_service) # -- Setup P2P Networking -- # gossip = Gossip( network_service, settings_cache, block_store.chain_head_state_root, block_store.get_recovery_mode, block_store.get_chain_heads, consensus_notifier, endpoint=endpoint, component=bind_component, # for DASHBOARD peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1 # signer=identity_signer own key ) completer = Completer(block_store, gossip) block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( block_store=block_store, state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( transaction_executor=executor, block_cache=block_cache, state_view_factory=state_view_factory, settings_cache=settings_cache, block_sender=block_sender, batch_sender=batch_sender, squash_handler=context_manager.get_squash_handler(), context_handlers=context_manager.get_context_handlers(), chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, check_publish_block_frequency=0.1, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory, metrics_registry=metrics_registry, consensus_notifier=consensus_notifier ) # for external engine control chain_controller = ChainController( block_sender=block_sender, block_cache=block_cache, state_view_factory=state_view_factory, transaction_executor=executor, chain_head_lock=block_publisher.chain_head_lock, on_chain_updated=block_publisher.on_chain_updated, on_head_updated=block_publisher.on_head_updated, on_topology_updated=block_publisher.on_topology_updated, get_recompute_context=block_publisher.get_recompute_context, belong_cluster=block_publisher.belong_cluster, squash_handler=context_manager.get_squash_handler(), context_handlers=context_manager.get_context_handlers(), chain_id_manager=chain_id_manager, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, chain_observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ], metrics_registry=metrics_registry, consensus_notifier=consensus_notifier, block_manager=block_manager, max_dag_branch=max_dag_branch) # for external engine control genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_manager=block_manager, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(block_publisher.queue_batch) completer.set_on_block_received(chain_controller.queue_block) completer.set_chain_has_block( chain_controller.has_block, chain_controller.has_genesis_federation_block, chain_controller.is_nests_ready) # -- Register Message Handler -- # network_handlers.add(network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, chain_controller.has_block, block_publisher.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add(component_dispatcher, gossip, context_manager, executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, sig_pool, block_publisher, metrics_registry) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool #block_manager = None LOGGER.debug("ConsensusProxy: INIT scheduler_type=%s", scheduler_type) consensus_proxy = ConsensusProxy( block_manager=block_manager, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory) consensus_handlers.add(consensus_dispatcher, consensus_thread_pool, consensus_proxy, consensus_notifier) self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._executor = executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._chain_controller = chain_controller