Exemple #1
0
    def test_add_subscriber(self):
        """Test adding a subscriber, who has no known blocks.

        This scenerio is valid for subscribers who have never connected and
        would need to receive all deltas since the genesis block.

        On registration, the subscriber should receive one event, comprised
        of the state changes for the genesis block.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        delta_store = StateDeltaStore(DictDatabase())

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=delta_store,
            block_store=block_tree_manager.block_store)

        delta_store.save_state_deltas(
            block_tree_manager.chain_head.state_root_hash,
            [StateChange(address='deadbeef0000000',
                         value='my_genesis_value'.encode(),
                         type=StateChange.SET),
             StateChange(address='a14ea01',
                         value='some other state value'.encode(),
                         type=StateChange.SET)])

        delta_processor.add_subscriber(
            'test_conn_id',
            [],
            ['deadbeef'])

        self.assertEqual(['test_conn_id'], delta_processor.subscriber_ids)

        # test that it catches up, and receives the events from the chain head.
        # In this case, it should just be once, as the chain head is the
        # genesis block
        chain_head = block_tree_manager.chain_head
        mock_service.send.assert_called_with(
            validator_pb2.Message.STATE_DELTA_EVENT,
            StateDeltaEvent(
                block_id=chain_head.identifier,
                block_num=chain_head.block_num,
                state_root_hash=chain_head.state_root_hash,
                previous_block_id=chain_head.previous_block_id,
                state_changes=[StateChange(address='deadbeef0000000',
                               value='my_genesis_value'.encode(),
                               type=StateChange.SET)]
            ).SerializeToString(),
            connection_id='test_conn_id')
Exemple #2
0
    def test_add_subscriber_unknown_block_id(self):
        """Test adding a subscriber, whose known block id is not in the
        current chain.
        """
        block_tree_manager = BlockTreeManager()

        delta_processor = StateDeltaProcessor(
            service=Mock(),
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        with self.assertRaises(NoKnownBlockError):
            delta_processor.add_subscriber('test_conn_id', ['deadbeefb10c4'],
                                           ['deadbeef'])
Exemple #3
0
    def test_is_valid_subscription_no_known_blocks(self):
        """Test that a check for a valid subscription with no known block ids
        returns True.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        delta_store = StateDeltaStore(DictDatabase())

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=delta_store,
            block_store=block_tree_manager.block_store)

        self.assertTrue(delta_processor.is_valid_subscription([]))
Exemple #4
0
    def test_register_with_uknown_block_ids(self):
        """Tests that the handler will respond with an UNKNOWN_BLOCK
        when a subscriber does not supply a known block id in
        last_known_block_ids
        """
        block_tree_manager = BlockTreeManager()

        delta_processor = StateDeltaProcessor(
            service=Mock(),
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        handler = StateDeltaSubscriberValidationHandler(delta_processor)

        request = StateDeltaSubscribeRequest(
            last_known_block_ids=['a'],
            address_prefixes=['000000']).SerializeToString()

        response = handler.handle('test_conn_id', request)

        self.assertEqual(HandlerStatus.RETURN, response.status)

        self.assertEqual(
            StateDeltaSubscribeResponse.UNKNOWN_BLOCK,
            response.message_out.status)
Exemple #5
0
    def test_is_valid_subscription_known_chain_head(self):
        """Test that a check for a valid subscription with the known block id
        is the chain head returns True.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        delta_store = StateDeltaStore(DictDatabase())

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=delta_store,
            block_store=block_tree_manager.block_store)

        self.assertTrue(delta_processor.is_valid_subscription([
            block_tree_manager.chain_head.identifier]))
Exemple #6
0
    def test_add_subscriber(self):
        """Test adding a subscriber, who has no known blocks.

        This scenerio is valid for subscribers who have never connected and
        would need to receive all deltas since the genesis block.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        delta_processor.add_subscriber('test_conn_id', [], ['deadbeef'])

        self.assertEqual(['test_conn_id'], delta_processor.subscriber_ids)
Exemple #7
0
    def test_add_subscriber_known_block_id(self):
        """Test adding a subscriber, whose known block id is the current
        chainhead.
        """

        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        delta_processor.add_subscriber(
            'test_conn_id', [block_tree_manager.chain_head.identifier],
            ['deadbeef'])

        self.assertEqual(['test_conn_id'], delta_processor.subscriber_ids)

        self.assertTrue(not mock_service.send.called)
Exemple #8
0
    def test_is_valid_subscription_known_old_chain(self):
        """Test that a check for a valid subscription where the known block id
        is a block in the middle of the chain should return True.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        chain = block_tree_manager.generate_chain(
            block_tree_manager.chain_head, 5)

        # Grab an id from the middle of the chain
        known_id = chain[3].identifier
        block_tree_manager.block_store.update_chain(chain)

        delta_store = StateDeltaStore(DictDatabase())
        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=delta_store,
            block_store=block_tree_manager.block_store)

        self.assertTrue(delta_processor.is_valid_subscription([known_id]))
Exemple #9
0
    def test_publish_deltas_no_state_changes(self):
        """Given a block transition, where no state changes happened (e.g. it
        only had transactions which did not change state), the
        StateDeltaProcessor should still publish an event with the block change
        information.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        database = DictDatabase()
        delta_store = StateDeltaStore(database)

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=delta_store,
            block_store=block_tree_manager.block_store)

        delta_processor.add_subscriber(
            'subscriber_conn_id', [block_tree_manager.chain_head.identifier],
            ['000000'])

        next_block = block_tree_manager.generate_block()
        delta_processor.publish_deltas(next_block)

        mock_service.send.assert_called_with(
            validator_pb2.Message.STATE_DELTA_EVENT,
            StateDeltaEvent(
                block_id=next_block.identifier,
                block_num=next_block.header.block_num,
                state_root_hash=next_block.header.state_root_hash,
                previous_block_id=next_block.header.previous_block_id,
                state_changes=[]).SerializeToString(),
            connection_id='subscriber_conn_id')
Exemple #10
0
    def test_add_subscriber(self):
        """Tests that the handler for adding the subscriptions will properly
        add a subscriber.
        """
        block_tree_manager = BlockTreeManager()
        delta_processor = StateDeltaProcessor(
            service=Mock(),
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        handler = StateDeltaAddSubscriberHandler(delta_processor)

        request = RegisterStateDeltaSubscriberRequest(
            last_known_block_ids=[block_tree_manager.chain_head.identifier],
            address_prefixes=['0123456']).SerializeToString()

        response = handler.handle('test_conn_id', request)
        self.assertEqual(HandlerStatus.PASS, response.status)

        self.assertEqual(['test_conn_id'], delta_processor.subscriber_ids)
Exemple #11
0
    def test_register_subscriber(self):
        """Tests that the handler will add a valid subscriber and return an OK
        response.
        """
        mock_block_store = {'a': Mock()}
        delta_processor = StateDeltaProcessor(service=Mock(),
                                              state_delta_store=Mock(),
                                              block_store=mock_block_store)

        handler = StateDeltaSubscriberHandler(delta_processor)

        request = RegisterStateDeltaSubscriberRequest(
            last_known_block_ids=['a'],
            address_prefixes=['000000']).SerializeToString()

        response = handler.handle('test_conn_id', request)

        self.assertEqual(HandlerStatus.RETURN, response.status)

        self.assertEqual(RegisterStateDeltaSubscriberResponse.OK,
                         response.message_out.status)
Exemple #12
0
    def test_publish_deltas(self):
        """Tests that a subscriber filtering on an address prefix receives only
        the changes in an event that match.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        database = DictDatabase()
        delta_store = StateDeltaStore(database)

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=delta_store,
            block_store=block_tree_manager.block_store)

        delta_processor.add_subscriber(
            'test_conn_id', [block_tree_manager.chain_head.identifier],
            ['deadbeef'])

        next_block = block_tree_manager.generate_block()
        # State added during context squash for our block
        delta_store.save_state_deltas(next_block.header.state_root_hash, [
            StateChange(address='deadbeef01',
                        value='my_state_Value'.encode(),
                        type=StateChange.SET),
            StateChange(address='a14ea01',
                        value='some other state value'.encode(),
                        type=StateChange.SET)
        ])

        # call to publish deltas for that block to the subscribers
        delta_processor.publish_deltas(next_block)

        mock_service.send.assert_called_with(
            validator_pb2.Message.STATE_DELTA_EVENT,
            StateDeltaEvent(
                block_id=next_block.identifier,
                block_num=next_block.header.block_num,
                state_root_hash=next_block.header.state_root_hash,
                previous_block_id=next_block.header.previous_block_id,
                state_changes=[
                    StateChange(address='deadbeef01',
                                value='my_state_Value'.encode(),
                                type=StateChange.SET)
                ]).SerializeToString(),
            connection_id='test_conn_id')
Exemple #13
0
    def test_register_subscriber(self):
        """Tests that the handler will validate a correct subscriber and return an OK
        response.
        """
        block_tree_manager = BlockTreeManager()

        delta_processor = StateDeltaProcessor(
            service=Mock(),
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        handler = StateDeltaSubscriberValidationHandler(delta_processor)

        request = RegisterStateDeltaSubscriberRequest(
            last_known_block_ids=[block_tree_manager.chain_head.identifier],
            address_prefixes=['000000']).SerializeToString()

        response = handler.handle('test_conn_id', request)

        self.assertEqual(HandlerStatus.RETURN_AND_PASS, response.status)

        self.assertEqual(RegisterStateDeltaSubscriberResponse.OK,
                         response.message_out.status)
Exemple #14
0
    def test_publish_deltas_subscriber_matches_no_addresses(self):
        """Given a subscriber whose prefix filters don't match any addresses
        in the current state delta, it should still receive an event with the
        block change information.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        database = DictDatabase()
        delta_store = StateDeltaStore(database)

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=delta_store,
            block_store=block_tree_manager.block_store)

        delta_processor.add_subscriber(
            'settings_conn_id',
            [block_tree_manager.chain_head.identifier],
            ['000000'])

        next_block = block_tree_manager.generate_block()
        # State added during context squash for our block
        delta_store.save_state_deltas(
            next_block.header.state_root_hash,
            [StateChange(address='deadbeef01',
                         value='my_state_Value'.encode(),
                         type=StateChange.SET),
             StateChange(address='a14ea01',
                         value='some other state value'.encode(),
                         type=StateChange.SET)])

        # call to publish deltas for that block to the subscribers
        delta_processor.publish_deltas(next_block)

        mock_service.send.assert_called_with(
            validator_pb2.Message.STATE_DELTA_EVENT,
            StateDeltaEvent(
                block_id=next_block.identifier,
                block_num=next_block.header.block_num,
                state_root_hash=next_block.header.state_root_hash,
                state_changes=[]
            ).SerializeToString(),
            connection_id='settings_conn_id')
Exemple #15
0
    def test_register_with_unknown_block_ids(self):
        """Tests that the handler will respond with a DROP
        when a subscriber does not supply a known block id in
        last_known_block_ids, but the subscriber is added.  It passed
        validation, but a fork may have occured between validation and now.
        """
        block_tree_manager = BlockTreeManager()

        delta_processor = StateDeltaProcessor(
            service=Mock(),
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        handler = StateDeltaAddSubscriberHandler(delta_processor)

        request = RegisterStateDeltaSubscriberRequest(
            last_known_block_ids=['a'],
            address_prefixes=['000000']).SerializeToString()

        response = handler.handle('test_conn_id', request)

        self.assertEqual(HandlerStatus.DROP, response.status)

        self.assertEqual(['test_conn_id'], delta_processor.subscriber_ids)
Exemple #16
0
    def test_remove_subscriber(self):
        """Test adding a subscriber, whose known block id is the current
        chainhead, followed by removing the subscriber.
        """
        mock_service = Mock()
        block_tree_manager = BlockTreeManager()

        delta_processor = StateDeltaProcessor(
            service=mock_service,
            state_delta_store=Mock(),
            block_store=block_tree_manager.block_store)

        delta_processor.add_subscriber(
            'test_conn_id', [block_tree_manager.chain_head.identifier],
            ['deadbeef'
             'beefdead'])

        self.assertEqual(['test_conn_id'], delta_processor.subscriber_ids)

        delta_processor.remove_subscriber('test_conn_id')

        self.assertEqual([], delta_processor.subscriber_ids)
Exemple #17
0
    def __init__(self, network_endpoint, component_endpoint, public_uri,
                 peering, join_list, peer_list, data_dir, config_dir,
                 identity_signing_key):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            public_uri (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the join_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            join_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(
            data_dir, 'state-deltas-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._process_pool = process_pool

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20)

        executor = TransactionExecutor(service=self._service,
                                       context_manager=context_manager,
                                       config_view_factory=ConfigViewFactory(
                                           StateViewFactory(merkle_db)))
        self._executor = executor

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*',
            heartbeat=True,
            public_uri=public_uri,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network,
                              public_uri=public_uri,
                              peering_mode=peering,
                              initial_join_endpoints=join_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            state_delta_processor=state_delta_processor,
            data_dir=data_dir,
            config_dir=config_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING, PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network), network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)

        # GOSSIP_MESSAGE 3) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip, completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(), network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(), network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BATCH_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(
                self._journal.get_block_store(), completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(self._journal.get_block_store(),
                                               completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor), thread_pool)
Exemple #18
0
    def __init__(self,
                 bind_network,
                 bind_component,
                 endpoint,
                 peering,
                 seeds_list,
                 peer_list,
                 data_dir,
                 config_dir,
                 identity_signing_key,
                 scheduler_type,
                 permissions,
                 network_public_key=None,
                 network_private_key=None,
                 roles=None,
                 metrics_registry=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """

        # -- Setup Global State Database and Factory -- #
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('global state database file is %s',
                     global_state_db_filename)
        global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c')
        state_view_factory = StateViewFactory(global_state_db)

        # -- Setup State Delta Store -- #
        delta_db_filename = os.path.join(
            data_dir, 'state-deltas-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')
        state_delta_store = StateDeltaStore(state_delta_db)

        # -- Setup Receipt Store -- #
        receipt_db_filename = os.path.join(
            data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
        receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')
        receipt_store = TransactionReceiptStore(receipt_db)

        # -- Setup Block Store -- #
        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)
        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)
        block_cache = BlockCache(block_store,
                                 keep_time=300,
                                 purge_frequency=30)

        # -- Setup Thread Pools -- #
        component_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=10, name='Component')
        network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10,
                                                             name='Network')
        sig_pool = InstrumentedThreadPoolExecutor(max_workers=3,
                                                  name='Signature')

        # -- Setup Dispatchers -- #
        component_dispatcher = Dispatcher(metrics_registry=metrics_registry)
        network_dispatcher = Dispatcher(metrics_registry=metrics_registry)

        # -- Setup Services -- #
        component_service = Interconnect(bind_component,
                                         component_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10,
                                         metrics_registry=metrics_registry)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        network_service = Interconnect(
            bind_network,
            dispatcher=network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=30,
            max_incoming_connections=100,
            max_future_callback_workers=10,
            authorize=True,
            public_key=signing.generate_public_key(identity_signing_key),
            priv_key=identity_signing_key,
            roles=roles,
            metrics_registry=metrics_registry)

        # -- Setup Transaction Execution Platform -- #
        context_manager = ContextManager(global_state_db, state_delta_store)

        batch_tracker = BatchTracker(block_store)

        executor = TransactionExecutor(
            service=component_service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker],
            metrics_registry=metrics_registry)

        component_service.set_check_connections(executor.check_connections)

        state_delta_processor = StateDeltaProcessor(component_service,
                                                    state_delta_store,
                                                    block_store)
        event_broadcaster = EventBroadcaster(component_service, block_store,
                                             receipt_store)

        # -- Setup P2P Networking -- #
        gossip = Gossip(network_service,
                        endpoint=endpoint,
                        peering_mode=peering,
                        initial_seed_endpoints=seeds_list,
                        initial_peer_endpoints=peer_list,
                        minimum_peer_connectivity=3,
                        maximum_peer_connectivity=10,
                        topology_check_frequency=1)

        completer = Completer(block_store, gossip)

        block_sender = BroadcastBlockSender(completer, gossip)
        batch_sender = BroadcastBatchSender(completer, gossip)
        chain_id_manager = ChainIdManager(data_dir)

        identity_view_factory = IdentityViewFactory(
            StateViewFactory(global_state_db))

        id_cache = IdentityCache(identity_view_factory,
                                 block_store.chain_head_state_root)

        # -- Setup Permissioning -- #
        permission_verifier = PermissionVerifier(
            permissions, block_store.chain_head_state_root, id_cache)

        identity_observer = IdentityObserver(to_update=id_cache.invalidate,
                                             forked=id_cache.forked)

        # -- Setup Journal -- #
        batch_injector_factory = DefaultBatchInjectorFactory(
            block_store=block_store,
            state_view_factory=state_view_factory,
            signing_key=identity_signing_key)

        block_publisher = BlockPublisher(
            transaction_executor=executor,
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            block_sender=block_sender,
            batch_sender=batch_sender,
            squash_handler=context_manager.get_squash_handler(),
            chain_head=block_store.chain_head,
            identity_signing_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            check_publish_block_frequency=0.1,
            batch_observers=[batch_tracker],
            batch_injector_factory=batch_injector_factory,
            metrics_registry=metrics_registry)

        chain_controller = ChainController(
            block_sender=block_sender,
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            transaction_executor=executor,
            chain_head_lock=block_publisher.chain_head_lock,
            on_chain_updated=block_publisher.on_chain_updated,
            squash_handler=context_manager.get_squash_handler(),
            chain_id_manager=chain_id_manager,
            identity_signing_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            chain_observers=[
                state_delta_processor, event_broadcaster, receipt_store,
                batch_tracker, identity_observer
            ],
            metrics_registry=metrics_registry)

        genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(block_publisher.queue_batch)
        completer.set_on_block_received(chain_controller.queue_block)

        # -- Register Message Handler -- #
        network_handlers.add(network_dispatcher, network_service, gossip,
                             completer, responder, network_thread_pool,
                             sig_pool, permission_verifier)

        component_handlers.add(
            component_dispatcher, gossip, context_manager, executor, completer,
            block_store, batch_tracker, global_state_db,
            self.get_chain_head_state_root_hash, receipt_store,
            state_delta_processor, state_delta_store, event_broadcaster,
            permission_verifier, component_thread_pool, sig_pool)

        # -- Store Object References -- #
        self._component_dispatcher = component_dispatcher
        self._component_service = component_service
        self._component_thread_pool = component_thread_pool

        self._network_dispatcher = network_dispatcher
        self._network_service = network_service
        self._network_thread_pool = network_thread_pool

        self._sig_pool = sig_pool

        self._context_manager = context_manager
        self._executor = executor
        self._genesis_controller = genesis_controller
        self._gossip = gossip

        self._block_publisher = block_publisher
        self._chain_controller = chain_controller
Exemple #19
0
    def __init__(self, bind_network, bind_component, endpoint,
                 peering, seeds_list, peer_list, data_dir, config_dir,
                 identity_signing_key, scheduler_type,
                 network_public_key=None,
                 network_private_key=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(data_dir,
                                   'merkle-{}.lmdb'.format(
                                       bind_network[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(data_dir,
                                         'state-deltas-{}.lmdb'.format(
                                             bind_network[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(
                                         bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        batch_tracker = BatchTracker(block_store)
        block_store.add_update_observer(batch_tracker)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        sig_pool = ThreadPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._sig_pool = sig_pool

        self._service = Interconnect(bind_component,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True)

        config_file = os.path.join(config_dir, "validator.toml")

        validator_config = {}
        if os.path.exists(config_file):
            with open(config_file) as fd:
                raw_config = fd.read()
            validator_config = toml.loads(raw_config)

        if scheduler_type is None:
            scheduler_type = validator_config.get("scheduler", "serial")

        executor = TransactionExecutor(
            service=self._service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(
                                    StateViewFactory(merkle_db)),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        self._executor = executor
        self._service.set_check_connections(executor.check_connections)

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        self._network = Interconnect(
            bind_network,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network,
                              endpoint=endpoint,
                              peering_mode=peering,
                              initial_seed_endpoints=seeds_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            state_delta_processor=state_delta_processor,
            data_dir=data_dir,
            config_dir=config_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300,
            batch_observers=[batch_tracker]
        )

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender
        )

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING,
            PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network),
            network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip),
            network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(),
            sig_pool)

        # GOSSIP_MESSAGE 3) Verifies batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            structure_verifier.GossipHandlerStructureVerifier(),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(
                gossip=self._gossip,
                completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 5) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            structure_verifier.GossipBlockResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BATCH_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            structure_verifier.GossipBatchResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            BatchListPermissionVerifier(
                settings_view_factory=SettingsViewFactory(
                    StateViewFactory(merkle_db)),
                current_root_func=self._journal.get_current_root,
            ),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            structure_verifier.BatchListStructureVerifier(),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(
                completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST,
            StateDeltaGetEventsHandler(block_store, state_delta_store),
            thread_pool)