Beispiel #1
0
    def test_requires_genesis_fails_if_block_exists(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - a chain head id
        the validator should produce an assertion, as it already has a
        genesis block and should not attempt to produce another.
        """
        self._with_empty_batch_file()

        block = self._create_block()
        block_store = self.make_block_store([block.block])
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            block_store=block_store,
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'),
            receipt_store=MagicMock())

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #2
0
    def test_does_not_require_genesis_with_no_file_no_network(self):
        """
        In this case, when there is:
         - no genesis.batch file
         - no chain head
         - no network
        the the GenesisController should not require genesis.
        """
        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            block_store=block_store,
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'),
            receipt_store=MagicMock())

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #3
0
    def test_requires_genesis_fails_if_joins_network_with_file(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - network id
        the validator should produce an assertion error, as it is joining
        a network, and not a genesis node.
        """
        self._with_empty_batch_file()
        self._with_network_name('some_block_chain_id')

        block_store = self.make_block_store()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            StateViewFactory(DictDatabase()),
            self._identity_key,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender')
        )

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #4
0
    def test_requires_genesis_fails_if_joins_network_with_file(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - network id
        the validator should produce an assertion error, as it is joining
        a network, and not a genesis node.
        """
        self._with_empty_batch_file()
        self._with_network_name('some_block_chain_id')

        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            block_store=block_store,
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'),
            receipt_store=MagicMock())

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #5
0
    def test_requires_genesis_fails_if_block_exists(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - a chain head id
        the validator should produce an assertion, as it already has a
        genesis block and should not attempt to produce another.
        """
        self._with_empty_batch_file()

        block_store = self.make_block_store({
            'chain_head_id': 'some_other_id',
            'some_other_id': b''
        })

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            StateViewFactory(DictDatabase()),
            self._identity_key,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender')
        )

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #6
0
    def test_requires_genesis(self):
        self._with_empty_batch_file()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            {},  # Empty block store
            data_dir=self._temp_dir)

        self.assertEqual(True, genesis_ctrl.requires_genesis())
Beispiel #7
0
    def test_requires_genesis(self):
        self._with_empty_batch_file()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            {},  # Empty block store
            data_dir=self._temp_dir
        )

        self.assertEqual(True, genesis_ctrl.requires_genesis())
Beispiel #8
0
    def test_empty_batch_file_should_produce_block(
        self, mock_scheduler_complete
    ):
        """
        In this case, the genesis batch, even with an empty list of batches,
        should produce a genesis block.
        Also:
         - the genesis.batch file should be deleted
         - the block_chain_id file should be created and populated
        """
        genesis_file = self._with_empty_batch_file()
        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        state_database = NativeLmdbDatabase(
            os.path.join(self._temp_dir, 'test_genesis.lmdb'),
            indexes=MerkleDatabase.create_index_configuration(),
            _size=10 * 1024 * 1024)
        merkle_db = MerkleDatabase(state_database)

        ctx_mgr = Mock(name='ContextManager')
        ctx_mgr.get_squash_handler.return_value = Mock()
        ctx_mgr.get_first_root.return_value = merkle_db.get_merkle_root()

        txn_executor = Mock(name='txn_executor')
        completer = Mock('completer')
        completer.add_block = Mock('add_block')

        genesis_ctrl = GenesisController(
            context_manager=ctx_mgr,
            transaction_executor=txn_executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=StateViewFactory(state_database),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'),
            receipt_store=MagicMock())

        on_done_fn = Mock(return_value='')
        genesis_ctrl.start(on_done_fn)

        self.assertEqual(False, os.path.exists(genesis_file))

        self.assertEqual(True, block_store.chain_head is not None)
        self.assertEqual(1, on_done_fn.call_count)
        self.assertEqual(1, completer.add_block.call_count)
        self.assertEqual(block_store.chain_head.identifier,
                         self._read_block_chain_id())
Beispiel #9
0
    def test_does_not_require_genesis_join_network(self):
        self._with_network_name('some_network_name')

        block_store = {}

        genesis_ctrl = GenesisController(Mock('context_manager'),
                                         Mock('txn_executor'),
                                         Mock('completer'),
                                         block_store,
                                         data_dir=self._temp_dir)

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #10
0
    def test_empty_batch_file_should_produce_block(
        self, mock_scheduler_complete
    ):
        """
        In this case, the genesis batch, even with an empty list of batches,
        should produce a genesis block.
        Also:
         - the genesis.batch file should be deleted
         - the block_chain_id file should be created and populated
        """
        genesis_file = self._with_empty_batch_file()
        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        state_database = NativeLmdbDatabase(
            os.path.join(self._temp_dir, 'test_genesis.lmdb'),
            indexes=MerkleDatabase.create_index_configuration(),
            _size=10 * 1024 * 1024)
        merkle_db = MerkleDatabase(state_database)

        ctx_mgr = Mock(name='ContextManager')
        ctx_mgr.get_squash_handler.return_value = Mock()
        ctx_mgr.get_first_root.return_value = merkle_db.get_merkle_root()

        txn_executor = Mock(name='txn_executor')
        completer = Mock('completer')
        completer.add_block = Mock('add_block')

        genesis_ctrl = GenesisController(
            context_manager=ctx_mgr,
            transaction_executor=txn_executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=StateViewFactory(state_database),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        on_done_fn = Mock(return_value='')
        genesis_ctrl.start(on_done_fn)

        self.assertEqual(False, os.path.exists(genesis_file))

        self.assertEqual(True, block_store.chain_head is not None)
        self.assertEqual(1, on_done_fn.call_count)
        self.assertEqual(1, completer.add_block.call_count)
        self.assertEqual(block_store.chain_head.identifier,
                         self._read_block_chain_id())
Beispiel #11
0
    def test_does_not_require_genesis_block_exists(self):
        block_store = {
            'chain_head_id': 'some_other_id'
        }

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            data_dir=self._temp_dir
        )

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #12
0
    def test_does_not_require_genesis_join_network(self):
        self._with_network_name('some_network_name')

        block_store = {}

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            data_dir=self._temp_dir
        )

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #13
0
    def test_does_not_require_genesis_block_exists(self):
        block_store = self.make_block_store({'chain_head_id': 'some_other_id'})

        genesis_ctrl = GenesisController(Mock('context_manager'),
                                         Mock('txn_executor'),
                                         Mock('completer'),
                                         block_store,
                                         StateViewFactory(DictDatabase()),
                                         self._identity_key,
                                         data_dir=self._temp_dir,
                                         chain_id_manager=ChainIdManager(
                                             self._temp_dir),
                                         batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #14
0
    def test_requires_genesis(self):
        self._with_empty_batch_file()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            self.make_block_store(),  # Empty block store
            StateViewFactory(DictDatabase()),
            self._identity_key,
            data_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir)
        )

        self.assertEqual(True, genesis_ctrl.requires_genesis())
Beispiel #15
0
    def test_requires_genesis(self):
        self._with_empty_batch_file()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            self.make_block_store(),  # Empty block store
            Mock('StateViewFactory'),
            self._signer,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(True, genesis_ctrl.requires_genesis())
Beispiel #16
0
    def test_requires_genesis(self):
        self._with_empty_batch_file()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            self.make_block_store(),  # Empty block store
            Mock('StateViewFactory'),
            self._signer,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(True, genesis_ctrl.requires_genesis())
Beispiel #17
0
    def test_does_not_require_genesis_with_no_file_no_network(self):
        """
        In this case, when there is:
         - no genesis.batch file
         - no chain head
         - no network
        the the GenesisController should not require genesis.
        """
        block_store = {}

        genesis_ctrl = GenesisController(Mock(name='context_manager'),
                                         Mock(name='txn_executor'),
                                         Mock('completer'),
                                         block_store,
                                         data_dir=self._temp_dir)

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #18
0
    def test_does_not_require_genesis_join_network(self):
        self._with_network_name('some_network_name')

        block_store = self.make_block_store()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            StateViewFactory(DictDatabase()),
            self._identity_key,
            data_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir)
        )

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #19
0
    def test_does_not_require_genesis_block_exists(self):
        block = self._create_block()
        block_store = self.make_block_store({block.header_signature: block})

        genesis_ctrl = GenesisController(Mock('context_manager'),
                                         Mock('txn_executor'),
                                         Mock('completer'),
                                         block_store,
                                         Mock('StateViewFactory'),
                                         self._signer,
                                         data_dir=self._temp_dir,
                                         config_dir=self._temp_dir,
                                         chain_id_manager=ChainIdManager(
                                             self._temp_dir),
                                         batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #20
0
    def test_empty_batch_file_should_produce_block(self,
                                                   mock_scheduler_complete):
        """
        In this case, the genesis batch, even with an empty list of batches,
        should produce a genesis block.
        Also:
         - the genesis.batch file should be deleted
         - the block_chain_id file should be created and populated
        """
        genesis_file = self._with_empty_batch_file()
        block_store = self.make_block_store()

        state_database = DictDatabase()
        merkle_db = MerkleDatabase(state_database)

        ctx_mgr = Mock(name='ContextManager')
        ctx_mgr.get_squash_handler.return_value = Mock()
        ctx_mgr.get_first_root.return_value = merkle_db.get_merkle_root()

        txn_executor = Mock(name='txn_executor')
        completer = Mock('completer')
        completer.add_block = Mock('add_block')

        genesis_ctrl = GenesisController(ctx_mgr,
                                         txn_executor,
                                         completer,
                                         block_store,
                                         StateViewFactory(state_database),
                                         self._signer,
                                         data_dir=self._temp_dir,
                                         config_dir=self._temp_dir,
                                         chain_id_manager=ChainIdManager(
                                             self._temp_dir),
                                         batch_sender=Mock('batch_sender'))

        on_done_fn = Mock(return_value='')
        genesis_ctrl.start(on_done_fn)

        self.assertEqual(False, os.path.exists(genesis_file))

        self.assertEqual(True, block_store.chain_head is not None)
        self.assertEqual(1, on_done_fn.call_count)
        self.assertEqual(1, completer.add_block.call_count)
        self.assertEqual(block_store.chain_head.identifier,
                         self._read_block_chain_id())
Beispiel #21
0
    def test_does_not_require_genesis_join_network(self):
        self._with_network_name('some_network_name')

        block_store = self.make_block_store()

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            Mock('StateViewFactory'),
            self._signer,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #22
0
    def test_does_not_require_genesis_join_network(self):
        self._with_network_name('some_network_name')

        block_store = self.make_block_store()

        genesis_ctrl = GenesisController(Mock('context_manager'),
                                         Mock('txn_executor'),
                                         Mock('completer'),
                                         block_store,
                                         Mock('StateViewFactory'),
                                         self._signer,
                                         data_dir=self._temp_dir,
                                         config_dir=self._temp_dir,
                                         chain_id_manager=ChainIdManager(
                                             self._temp_dir),
                                         batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #23
0
    def test_does_not_require_genesis_block_exists(self):
        block_store = self.make_block_store({
            'chain_head_id': 'some_other_id'
        })

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            StateViewFactory(DictDatabase()),
            self._identity_key,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender')
        )

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #24
0
    def test_does_not_require_genesis_with_no_file_no_network(self):
        """
        In this case, when there is:
         - no genesis.batch file
         - no chain head
         - no network
        the the GenesisController should not require genesis.
        """
        block_store = {}

        genesis_ctrl = GenesisController(
            Mock(name='context_manager'),
            Mock(name='txn_executor'),
            Mock('completer'),
            block_store,
            data_dir=self._temp_dir
        )

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #25
0
    def test_does_not_require_genesis_block_exists(self):
        block = self._create_block()
        block_store = self.make_block_store({
            block.header_signature: block
        })

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            Mock('StateViewFactory'),
            self._signer,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #26
0
    def test_requires_genesis_fails_if_block_exists(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - a chain head id
        the validator should produce an assertion, as it already has a
        genesis block and should not attempt to produce another.
        """
        self._with_empty_batch_file()

        block_store = {'chain_head_id': 'some_other_id'}

        genesis_ctrl = GenesisController(Mock('context_manager'),
                                         Mock('txn_executor'),
                                         Mock('completer'),
                                         block_store,
                                         data_dir=self._temp_dir)

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #27
0
    def test_does_not_require_genesis_block_exists(self):
        block = self._create_block()
        block_store = self.make_block_store({block.header_signature: block})
        block_manager = BlockManager()
        block_manager.add_store("commit_store", block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            completer=Mock('completer'),
            block_store=block_store,
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #28
0
    def test_does_not_require_genesis_block_exists(self):
        block = self._create_block()
        block_store = self.make_block_store([block.block])
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            block_store=block_store,
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'),
            receipt_store=MagicMock())

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #29
0
    def test_does_not_require_genesis_join_network(self):
        self._with_network_name('some_network_name')

        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            completer=Mock('completer'),
            block_store=block_store,
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #30
0
    def test_requires_genesis(self):
        self._with_empty_batch_file()

        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            block_store=block_store,  # Empty block store
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'),
            receipt_store=MagicMock())

        self.assertEqual(True, genesis_ctrl.requires_genesis())
Beispiel #31
0
    def test_does_not_require_genesis_join_network(self):
        self._with_network_name('some_network_name')

        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_commit_store(block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            completer=Mock('completer'),
            block_store=block_store,
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #32
0
    def test_requires_genesis(self):
        self._with_empty_batch_file()

        block_store = self.make_block_store()
        block_manager = BlockManager()
        block_manager.add_store("commit_store", block_store)

        genesis_ctrl = GenesisController(
            context_manager=Mock('context_manager'),
            transaction_executor=Mock('txn_executor'),
            completer=Mock('completer'),
            block_store=block_store,  # Empty block store
            state_view_factory=Mock('StateViewFactory'),
            identity_signer=self._signer,
            block_manager=block_manager,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(True, genesis_ctrl.requires_genesis())
Beispiel #33
0
    def test_requires_genesis_fails_if_joins_network_with_file(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - network id
        the validator should produce an assertion error, as it is joining
        a network, and not a genesis node.
        """
        self._with_empty_batch_file()
        self._with_network_name('some_block_chain_id')

        block_store = {}

        genesis_ctrl = GenesisController(Mock('context_manager'),
                                         Mock('txn_executor'),
                                         Mock('completer'),
                                         block_store,
                                         data_dir=self._temp_dir)

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #34
0
    def test_empty_batch_file_should_produce_block(
        self, mock_scheduler_complete
    ):
        """
        In this case, the genesis batch, even with an empty list of batches,
        should produce a genesis block.
        Also:
         - the genesis.batch file should be deleted
         - the block_chain_id file should be created and populated
        """
        genesis_file = self._with_empty_batch_file()
        block_store = {}

        ctx_mgr = Mock(name='ContextManager')
        ctx_mgr.get_squash_handler.return_value = Mock()
        ctx_mgr.get_first_root.return_value = '000000000'

        txn_executor = Mock(name='txn_executor')
        completer = Mock('completer')
        completer.add_block = Mock('add_block')

        genesis_ctrl = GenesisController(
            ctx_mgr,
            txn_executor,
            completer,
            block_store,
            data_dir=self._temp_dir
        )

        on_done_fn = Mock(return_value='')
        genesis_ctrl.start(on_done_fn)

        self.assertEqual(False, os.path.exists(genesis_file))

        self.assertEqual(True, 'chain_head_id' in block_store)
        self.assertEqual(True, block_store['chain_head_id'] in block_store)
        self.assertEqual(1, on_done_fn.call_count)
        self.assertEqual(1, completer.add_block.call_count)
        self.assertEqual(block_store['chain_head_id'],
                         self._read_block_chain_id())
Beispiel #35
0
    def test_does_not_require_genesis_with_no_file_no_network(self):
        """
        In this case, when there is:
         - no genesis.batch file
         - no chain head
         - no network
        the the GenesisController should not require genesis.
        """
        block_store = self.make_block_store()

        genesis_ctrl = GenesisController(
            Mock(name='context_manager'),
            Mock(name='txn_executor'),
            Mock('completer'),
            block_store,
            StateViewFactory(DictDatabase()),
            self._identity_key,
            data_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir)
        )

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #36
0
    def test_empty_batch_file_should_produce_block(self,
                                                   mock_scheduler_complete):
        """
        In this case, the genesis batch, even with an empty list of batches,
        should produce a genesis block.
        Also:
         - the genesis.batch file should be deleted
         - the block_chain_id file should be created and populated
        """
        genesis_file = self._with_empty_batch_file()
        block_store = {}

        ctx_mgr = Mock(name='ContextManager')
        ctx_mgr.get_squash_handler.return_value = Mock()
        ctx_mgr.get_first_root.return_value = '000000000'

        txn_executor = Mock(name='txn_executor')
        completer = Mock('completer')
        completer.add_block = Mock('add_block')

        genesis_ctrl = GenesisController(ctx_mgr,
                                         txn_executor,
                                         completer,
                                         block_store,
                                         self._identity_key,
                                         data_dir=self._temp_dir)

        on_done_fn = Mock(return_value='')
        genesis_ctrl.start(on_done_fn)

        self.assertEqual(False, os.path.exists(genesis_file))

        self.assertEqual(True, 'chain_head_id' in block_store)
        self.assertEqual(True, block_store['chain_head_id'] in block_store)
        self.assertEqual(1, on_done_fn.call_count)
        self.assertEqual(1, completer.add_block.call_count)
        self.assertEqual(block_store['chain_head_id'],
                         self._read_block_chain_id())
Beispiel #37
0
    def test_requires_genesis_fails_if_joins_network_with_file(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - network id
        the validator should produce an assertion error, as it is joining
        a network, and not a genesis node.
        """
        self._with_empty_batch_file()
        self._with_network_name('some_block_chain_id')

        block_store = {}

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            data_dir=self._temp_dir
        )

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #38
0
    def test_does_not_require_genesis_with_no_file_no_network(self):
        """
        In this case, when there is:
         - no genesis.batch file
         - no chain head
         - no network
        the the GenesisController should not require genesis.
        """
        block_store = self.make_block_store()

        genesis_ctrl = GenesisController(
            Mock(name='context_manager'),
            Mock(name='txn_executor'),
            Mock('completer'),
            block_store,
            Mock('StateViewFactory'),
            self._signer,
            data_dir=self._temp_dir,
            config_dir=self._temp_dir,
            chain_id_manager=ChainIdManager(self._temp_dir),
            batch_sender=Mock('batch_sender'))

        self.assertEqual(False, genesis_ctrl.requires_genesis())
Beispiel #39
0
    def test_requires_genesis_fails_if_block_exists(self):
        """
        In this case, when there is
         - a genesis_batch_file
         - a chain head id
        the validator should produce an assertion, as it already has a
        genesis block and should not attempt to produce another.
        """
        self._with_empty_batch_file()

        block_store = {
            'chain_head_id': 'some_other_id'
        }

        genesis_ctrl = GenesisController(
            Mock('context_manager'),
            Mock('txn_executor'),
            Mock('completer'),
            block_store,
            data_dir=self._temp_dir
        )

        with self.assertRaises(InvalidGenesisStateError):
            genesis_ctrl.requires_genesis()
Beispiel #40
0
    def __init__(self,
                 bind_network,
                 bind_component,
                 bind_consensus,
                 endpoint,
                 peering,
                 seeds_list,
                 peer_list,
                 data_dir,
                 config_dir,
                 identity_signer,
                 scheduler_type,
                 permissions,
                 minimum_peer_connectivity,
                 maximum_peer_connectivity,
                 state_pruning_block_depth,
                 network_public_key=None,
                 network_private_key=None,
                 roles=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signer (str): cryptographic signer the validator uses for
                signing
        """
        # -- Setup Global State Database and Factory -- #
        global_state_db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('global state database file is %s',
                     global_state_db_filename)
        global_state_db = NativeLmdbDatabase(
            global_state_db_filename,
            indexes=MerkleDatabase.create_index_configuration())
        state_view_factory = StateViewFactory(global_state_db)

        # -- Setup Receipt Store -- #
        receipt_db_filename = os.path.join(
            data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
        receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')
        receipt_store = TransactionReceiptStore(receipt_db)

        # -- Setup Block Store -- #
        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)
        block_db = IndexedDatabase(
            block_db_filename,
            BlockStore.serialize_block,
            BlockStore.deserialize_block,
            flag='c',
            indexes=BlockStore.create_index_configuration())
        block_store = BlockStore(block_db)
        # The cache keep time for the journal's block cache must be greater
        # than the cache keep time used by the completer.
        base_keep_time = 1200
        block_cache = BlockCache(block_store,
                                 keep_time=int(base_keep_time * 9 / 8),
                                 purge_frequency=30)

        # -- Setup Thread Pools -- #
        component_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=10, name='Component')
        network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10,
                                                             name='Network')
        client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5,
                                                            name='Client')
        sig_pool = InstrumentedThreadPoolExecutor(max_workers=3,
                                                  name='Signature')

        # -- Setup Dispatchers -- #
        component_dispatcher = Dispatcher()
        network_dispatcher = Dispatcher()

        # -- Setup Services -- #
        component_service = Interconnect(bind_component,
                                         component_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        network_service = Interconnect(bind_network,
                                       dispatcher=network_dispatcher,
                                       zmq_identity=zmq_identity,
                                       secured=secure,
                                       server_public_key=network_public_key,
                                       server_private_key=network_private_key,
                                       heartbeat=True,
                                       public_endpoint=endpoint,
                                       connection_timeout=120,
                                       max_incoming_connections=100,
                                       max_future_callback_workers=10,
                                       authorize=True,
                                       signer=identity_signer,
                                       roles=roles)

        # -- Setup Transaction Execution Platform -- #
        context_manager = ContextManager(global_state_db)

        batch_tracker = BatchTracker(block_store)

        settings_cache = SettingsCache(
            SettingsViewFactory(state_view_factory), )

        transaction_executor = TransactionExecutor(
            service=component_service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        component_service.set_check_connections(
            transaction_executor.check_connections)

        event_broadcaster = EventBroadcaster(component_service, block_store,
                                             receipt_store)

        # -- Setup P2P Networking -- #
        gossip = Gossip(network_service,
                        settings_cache,
                        lambda: block_store.chain_head,
                        block_store.chain_head_state_root,
                        endpoint=endpoint,
                        peering_mode=peering,
                        initial_seed_endpoints=seeds_list,
                        initial_peer_endpoints=peer_list,
                        minimum_peer_connectivity=minimum_peer_connectivity,
                        maximum_peer_connectivity=maximum_peer_connectivity,
                        topology_check_frequency=1)

        completer = Completer(block_store,
                              gossip,
                              cache_keep_time=base_keep_time,
                              cache_purge_frequency=30,
                              requested_keep_time=300)

        block_sender = BroadcastBlockSender(completer, gossip)
        batch_sender = BroadcastBatchSender(completer, gossip)
        chain_id_manager = ChainIdManager(data_dir)

        identity_view_factory = IdentityViewFactory(
            StateViewFactory(global_state_db))

        id_cache = IdentityCache(identity_view_factory)

        # -- Setup Permissioning -- #
        permission_verifier = PermissionVerifier(
            permissions, block_store.chain_head_state_root, id_cache)

        identity_observer = IdentityObserver(to_update=id_cache.invalidate,
                                             forked=id_cache.forked)

        settings_observer = SettingsObserver(
            to_update=settings_cache.invalidate, forked=settings_cache.forked)

        # -- Consensus Engine -- #
        consensus_thread_pool = InstrumentedThreadPoolExecutor(
            max_workers=3, name='Consensus')
        consensus_dispatcher = Dispatcher()
        consensus_service = Interconnect(bind_consensus,
                                         consensus_dispatcher,
                                         secured=False,
                                         heartbeat=False,
                                         max_incoming_connections=20,
                                         monitor=True,
                                         max_future_callback_workers=10)

        consensus_notifier = ConsensusNotifier(consensus_service)

        # -- Setup Journal -- #
        batch_injector_factory = DefaultBatchInjectorFactory(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            signer=identity_signer)

        block_publisher = BlockPublisher(
            transaction_executor=transaction_executor,
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            settings_cache=settings_cache,
            block_sender=block_sender,
            batch_sender=batch_sender,
            chain_head=block_store.chain_head,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            check_publish_block_frequency=0.1,
            batch_observers=[batch_tracker],
            batch_injector_factory=batch_injector_factory)

        block_publisher_batch_sender = block_publisher.batch_sender()

        block_validator = BlockValidator(
            block_cache=block_cache,
            state_view_factory=state_view_factory,
            transaction_executor=transaction_executor,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier)

        chain_controller = ChainController(
            block_store=block_store,
            block_cache=block_cache,
            block_validator=block_validator,
            state_database=global_state_db,
            chain_head_lock=block_publisher.chain_head_lock,
            state_pruning_block_depth=state_pruning_block_depth,
            data_dir=data_dir,
            observers=[
                event_broadcaster, receipt_store, batch_tracker,
                identity_observer, settings_observer
            ])

        genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=transaction_executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_signer=identity_signer,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(block_publisher_batch_sender.send)
        completer.set_on_block_received(chain_controller.queue_block)
        completer.set_chain_has_block(chain_controller.has_block)

        # -- Register Message Handler -- #
        network_handlers.add(network_dispatcher, network_service, gossip,
                             completer, responder, network_thread_pool,
                             sig_pool, chain_controller.has_block,
                             block_publisher.has_batch, permission_verifier,
                             block_publisher, consensus_notifier)

        component_handlers.add(component_dispatcher, gossip, context_manager,
                               transaction_executor, completer, block_store,
                               batch_tracker, global_state_db,
                               self.get_chain_head_state_root_hash,
                               receipt_store, event_broadcaster,
                               permission_verifier, component_thread_pool,
                               client_thread_pool, sig_pool, block_publisher)

        # -- Store Object References -- #
        self._component_dispatcher = component_dispatcher
        self._component_service = component_service
        self._component_thread_pool = component_thread_pool

        self._network_dispatcher = network_dispatcher
        self._network_service = network_service
        self._network_thread_pool = network_thread_pool

        consensus_proxy = ConsensusProxy(
            block_cache=block_cache,
            chain_controller=chain_controller,
            block_publisher=block_publisher,
            gossip=gossip,
            identity_signer=identity_signer,
            settings_view_factory=SettingsViewFactory(state_view_factory),
            state_view_factory=state_view_factory)

        consensus_handlers.add(consensus_dispatcher, consensus_thread_pool,
                               consensus_proxy)

        self._consensus_dispatcher = consensus_dispatcher
        self._consensus_service = consensus_service
        self._consensus_thread_pool = consensus_thread_pool

        self._client_thread_pool = client_thread_pool
        self._sig_pool = sig_pool

        self._context_manager = context_manager
        self._transaction_executor = transaction_executor
        self._genesis_controller = genesis_controller
        self._gossip = gossip

        self._block_publisher = block_publisher
        self._chain_controller = chain_controller
        self._block_validator = block_validator
Beispiel #41
0
    def __init__(self, bind_network, bind_component, endpoint,
                 peering, seeds_list, peer_list, data_dir, config_dir,
                 identity_signing_key, network_public_key=None,
                 network_private_key=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(data_dir,
                                   'merkle-{}.lmdb'.format(
                                       bind_network[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(data_dir,
                                         'state-deltas-{}.lmdb'.format(
                                             bind_network[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(
                                         bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        batch_tracker = BatchTracker(block_store)
        block_store.add_update_observer(batch_tracker)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        sig_pool = ThreadPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._sig_pool = sig_pool

        self._service = Interconnect(bind_component,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True)

        executor = TransactionExecutor(
            service=self._service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(
                StateViewFactory(merkle_db)),
            invalid_observers=[batch_tracker])
        self._executor = executor
        self._service.set_check_connections(executor.check_connections)

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        self._network = Interconnect(
            bind_network,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network,
                              endpoint=endpoint,
                              peering_mode=peering,
                              initial_seed_endpoints=seeds_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            state_delta_processor=state_delta_processor,
            data_dir=data_dir,
            config_dir=config_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300,
            batch_observers=[batch_tracker]
        )

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender
        )

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING,
            PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network),
            network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip),
            network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(),
            sig_pool)

        # GOSSIP_MESSAGE 3) Verifies batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            structure_verifier.GossipHandlerStructureVerifier(),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(
                gossip=self._gossip,
                completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 5) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            structure_verifier.GossipBlockResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BATCH_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            structure_verifier.GossipBatchResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            BatchListPermissionVerifier(
                settings_view_factory=SettingsViewFactory(
                    StateViewFactory(merkle_db)),
                current_root_func=self._journal.get_current_root,
            ),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            structure_verifier.BatchListStructureVerifier(),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(
                completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST,
            GetStateDeltaEventsHandler(block_store, state_delta_store),
            thread_pool)
Beispiel #42
0
    def __init__(self, network_endpoint, component_endpoint, peer_list,
                 data_dir, identity_signing_key):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            key_dir (str): path to the key directory
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        context_manager = ContextManager(merkle_db)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._process_pool = process_pool

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20)

        executor = TransactionExecutor(service=self._service,
                                       context_manager=context_manager,
                                       config_view_factory=ConfigViewFactory(
                                           StateViewFactory(merkle_db)))
        self._executor = executor

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*',
            heartbeat=True,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network, initial_peer_endpoints=peer_list)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            data_dir=data_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING, PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network), network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)

        # GOSSIP_MESSAGE 3) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip, completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(), network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(), network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BATCH_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(completer),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(
                self._journal.get_block_store(), completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(self._journal.get_block_store(),
                                               completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)
Beispiel #43
0
class Validator(object):
    def __init__(self, network_endpoint, component_endpoint, peer_list,
                 data_dir, identity_signing_key):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            key_dir (str): path to the key directory
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'n')

        context_manager = ContextManager(merkle_db)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'n')
        block_store = BlockStore(block_db)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._process_pool = process_pool

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20)

        executor = TransactionExecutor(service=self._service,
                                       context_manager=context_manager,
                                       config_view_factory=ConfigViewFactory(
                                           StateViewFactory(merkle_db)))
        self._executor = executor

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*',
            heartbeat=True,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network, initial_peer_endpoints=peer_list)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            data_dir=data_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING, PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network), network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(
                self._journal.get_block_store(), completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(self._journal.get_block_store(),
                                               completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

    def start(self):
        self._dispatcher.start()
        self._service.start()
        if self._genesis_controller.requires_genesis():
            self._genesis_controller.start(self._start)
        else:
            self._start()

    def _start(self):
        self._network_dispatcher.start()
        self._network.start()

        self._gossip.start()
        self._journal.start()

        signal_event = threading.Event()

        signal.signal(signal.SIGTERM, lambda sig, fr: signal_event.set())
        # This is where the main thread will be during the bulk of the
        # validator's life.
        while not signal_event.is_set():
            signal_event.wait(timeout=20)

    def stop(self):
        self._network.stop()

        self._service.stop()

        self._process_pool.shutdown(wait=True)
        self._network_thread_pool.shutdown(wait=True)
        self._thread_pool.shutdown(wait=True)

        self._executor.stop()
        self._context_manager.stop()

        self._journal.stop()

        threads = threading.enumerate()

        # This will remove the MainThread, which will exit when we exit with
        # a sys.exit() or exit of main().
        threads.remove(threading.current_thread())

        # Several Thread subclasses have a stop method not defined
        # in superclass.
        for t in threads:
            if hasattr(t, 'stop'):
                t.stop()

        while len(threads) > 0:
            if len(threads) < 4:
                LOGGER.info(
                    "remaining threads: %s", ", ".join([
                        "{} ({})".format(x.name, x.__class__.__name__)
                        for x in threads
                    ]))
            for t in threads.copy():
                if not t.is_alive():
                    t.join()
                    threads.remove(t)
                if len(threads) > 0:
                    time.sleep(1)

        LOGGER.info("All threads have been stopped and joined")
Beispiel #44
0
class Validator(object):
    def __init__(self, network_endpoint, component_endpoint, peer_list,
                 data_dir, key_dir):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            key_dir (str): path to the key directory
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'n')
        context_manager = ContextManager(merkle_db)
        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'n')
        block_store = BlockStore(block_db)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False)
        executor = TransactionExecutor(service=self._service,
                                       context_manager=context_manager,
                                       config_view_factory=ConfigViewFactory(
                                           StateViewFactory(merkle_db)))

        identity = hashlib.sha512(time.time().hex().encode()).hexdigest()[:23]

        identity_signing_key = Validator.load_identity_signing_key(
            key_dir, DEFAULT_KEY_NAME)

        network_thread_pool = ThreadPoolExecutor(max_workers=10)

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            identity=identity,
            peer_connections=peer_list,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*')

        self._gossip = Gossip(self._network)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            chain_id_manager=chain_id_manager)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        self._network_dispatcher.add_handler(validator_pb2.Message.GOSSIP_PING,
                                             PingHandler(),
                                             network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(self._journal.get_block_store(),
                                               completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

    def start(self):
        self._dispatcher.start()
        self._service.start()
        if self._genesis_controller.requires_genesis():
            self._genesis_controller.start(self._start)
        else:
            self._start()

    def _start(self):
        self._network_dispatcher.start()
        self._network.start(daemon=True)
        self._gossip.start()
        self._journal.start()

    def stop(self):
        self._service.stop()
        self._network.stop()
        self._journal.stop()

    @staticmethod
    def load_identity_signing_key(key_dir, key_name):
        """Loads a private key from the key director, based on a validator's
        identity.

        Args:
            key_dir (str): The path to the key directory.
            key_name (str): The name of the key to load.

        Returns:
            str: the private signing key, in hex.
        """
        key_path = os.path.join(key_dir, '{}.wif'.format(key_name))

        if os.path.exists(key_path):
            LOGGER.debug('Found signing key %s', key_path)
            with open(key_path, 'r') as key_file:
                wif_key = key_file.read().strip()
                return signing.encode_privkey(signing.decode_privkey(wif_key),
                                              'hex')
        else:
            LOGGER.info('No signing key found. Generating %s', key_path)
            priv_key = signing.generate_privkey()
            with open(key_path, 'w') as key_file:
                key_file.write(signing.encode_privkey(priv_key))

            return signing.encode_privkey(priv_key, 'hex')
Beispiel #45
0
class Validator(object):
    def __init__(self,
                 bind_network,
                 bind_component,
                 endpoint,
                 peering,
                 seeds_list,
                 peer_list,
                 data_dir,
                 config_dir,
                 identity_signing_key,
                 scheduler_type,
                 permissions,
                 network_public_key=None,
                 network_private_key=None,
                 roles=None,
                 metrics_registry=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(data_dir,
                                   'merkle-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(
            data_dir, 'state-deltas-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        receipt_db_filename = os.path.join(
            data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('txn receipt store file is %s', receipt_db_filename)
        receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)
        receipt_store = TransactionReceiptStore(receipt_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        batch_tracker = BatchTracker(block_store)

        # setup network
        self._dispatcher = Dispatcher(metrics_registry=metrics_registry)

        thread_pool = ThreadPoolExecutor(max_workers=10)
        sig_pool = ThreadPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._sig_pool = sig_pool

        self._service = Interconnect(bind_component,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True,
                                     max_future_callback_workers=10,
                                     metrics_registry=metrics_registry)

        executor = TransactionExecutor(
            service=self._service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(
                StateViewFactory(merkle_db)),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker],
            metrics_registry=metrics_registry)

        self._executor = executor
        self._service.set_check_connections(executor.check_connections)

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)
        event_broadcaster = EventBroadcaster(self._service, block_store,
                                             receipt_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher(
            metrics_registry=metrics_registry)

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        self._network = Interconnect(
            bind_network,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=30,
            max_incoming_connections=100,
            max_future_callback_workers=10,
            authorize=True,
            public_key=signing.generate_pubkey(identity_signing_key),
            priv_key=identity_signing_key,
            roles=roles,
            metrics_registry=metrics_registry)

        self._gossip = Gossip(self._network,
                              endpoint=endpoint,
                              peering_mode=peering,
                              initial_seed_endpoints=seeds_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)

        identity_view_factory = IdentityViewFactory(
            StateViewFactory(merkle_db))

        id_cache = IdentityCache(identity_view_factory,
                                 block_store.chain_head_state_root)

        permission_verifier = PermissionVerifier(
            permissions, block_store.chain_head_state_root, id_cache)

        identity_observer = IdentityObserver(to_update=id_cache.invalidate,
                                             forked=id_cache.forked)

        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            data_dir=data_dir,
            config_dir=config_dir,
            permission_verifier=permission_verifier,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300,
            batch_observers=[batch_tracker],
            chain_observers=[
                state_delta_processor, event_broadcaster, receipt_store,
                batch_tracker, identity_observer
            ],
            metrics_registry=metrics_registry)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_ADD_RECEIPT_DATA_REQUEST,
            tp_state_handlers.TpAddReceiptDataHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_ADD_EVENT_REQUEST,
            tp_state_handlers.TpAddEventHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_DEL_REQUEST,
            tp_state_handlers.TpStateDeleteHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING,
            PingHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.AUTHORIZATION_VIOLATION,
            AuthorizationViolationHandler(network=self._network,
                                          gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.AUTHORIZATION_TRUST_REQUEST,
            AuthorizationTrustRequestHandler(
                network=self._network,
                permission_verifier=permission_verifier,
                gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.AUTHORIZATION_CHALLENGE_REQUEST,
            AuthorizationChallengeRequestHandler(network=self._network),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.AUTHORIZATION_CHALLENGE_SUBMIT,
            AuthorizationChallengeSubmitHandler(
                network=self._network,
                permission_verifier=permission_verifier,
                gossip=self._gossip), network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verify Network Permissions
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        # GOSSIP_MESSAGE 3) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), sig_pool)

        # GOSSIP_MESSAGE 4) Verifies batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            structure_verifier.GossipHandlerStructureVerifier(),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Verifies that the node is allowed to publish a
        # block
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            NetworkConsensusPermissionHandler(
                network=self._network,
                permission_verifier=permission_verifier,
                gossip=self._gossip), network_thread_pool)

        # GOSSIP_MESSAGE 5) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip, completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 6) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(), network_thread_pool)

        # GOSSIP_MESSAGE 2) Verify Network Permissions
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BLOCK_RESPONSE 4) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            structure_verifier.GossipBlockResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 5) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            NetworkPermissionHandler(network=self._network,
                                     permission_verifier=permission_verifier,
                                     gossip=self._gossip), network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(), network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BATCH_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            structure_verifier.GossipBatchResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            BatchListPermissionVerifier(
                permission_verifier=permission_verifier), sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            structure_verifier.BatchListStructureVerifier(),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(batch_tracker), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(batch_tracker), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_RECEIPT_GET_REQUEST,
            ClientReceiptGetRequestHandler(receipt_store), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST,
            StateDeltaGetEventsHandler(block_store, state_delta_store),
            thread_pool)

        # Client Events Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST,
            ClientEventsSubscribeValidationHandler(event_broadcaster),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_EVENTS_SUBSCRIBE_REQUEST,
            ClientEventsSubscribeHandler(event_broadcaster), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_EVENTS_UNSUBSCRIBE_REQUEST,
            ClientEventsUnsubscribeHandler(event_broadcaster), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_EVENTS_GET_REQUEST,
            ClientEventsGetRequestHandler(event_broadcaster), thread_pool)

    def start(self):
        self._dispatcher.start()
        self._service.start()
        if self._genesis_controller.requires_genesis():
            self._genesis_controller.start(self._start)
        else:
            self._start()

    def _start(self):
        self._network_dispatcher.start()
        self._network.start()

        self._gossip.start()
        self._journal.start()

        signal_event = threading.Event()

        signal.signal(signal.SIGTERM, lambda sig, fr: signal_event.set())
        # This is where the main thread will be during the bulk of the
        # validator's life.
        while not signal_event.is_set():
            signal_event.wait(timeout=20)

    def stop(self):
        self._gossip.stop()
        self._dispatcher.stop()
        self._network_dispatcher.stop()
        self._network.stop()

        self._service.stop()

        self._network_thread_pool.shutdown(wait=True)
        self._thread_pool.shutdown(wait=True)
        self._sig_pool.shutdown(wait=True)

        self._executor.stop()
        self._context_manager.stop()

        self._journal.stop()

        threads = threading.enumerate()

        # This will remove the MainThread, which will exit when we exit with
        # a sys.exit() or exit of main().
        threads.remove(threading.current_thread())

        while threads:
            if len(threads) < 4:
                LOGGER.info(
                    "remaining threads: %s", ", ".join([
                        "{} ({})".format(x.name, x.__class__.__name__)
                        for x in threads
                    ]))
            for t in threads.copy():
                if not t.is_alive():
                    t.join()
                    threads.remove(t)
                if threads:
                    time.sleep(1)

        LOGGER.info("All threads have been stopped and joined")
Beispiel #46
0
class Validator(object):
    def __init__(self, network_endpoint, component_endpoint, public_uri,
                 peering, join_list, peer_list, data_dir, config_dir,
                 identity_signing_key):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            public_uri (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the join_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            join_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(
            data_dir, 'state-deltas-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(
            data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._process_pool = process_pool

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20)

        executor = TransactionExecutor(service=self._service,
                                       context_manager=context_manager,
                                       config_view_factory=ConfigViewFactory(
                                           StateViewFactory(merkle_db)))
        self._executor = executor

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*',
            heartbeat=True,
            public_uri=public_uri,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network,
                              public_uri=public_uri,
                              peering_mode=peering,
                              initial_join_endpoints=join_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            state_delta_processor=state_delta_processor,
            data_dir=data_dir,
            config_dir=config_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300)

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender)

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING, PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network), network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip), network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)

        # GOSSIP_MESSAGE 3) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip, completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(), network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(), network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            process_pool)

        # GOSSIP_BATCH_RESPONSE 3) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(
                self._journal.get_block_store(), completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(self._journal.get_block_store(),
                                               completer.batch_cache),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(merkle_db,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(merkle_db,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor), thread_pool)

    def start(self):
        self._dispatcher.start()
        self._service.start()
        if self._genesis_controller.requires_genesis():
            self._genesis_controller.start(self._start)
        else:
            self._start()

    def _start(self):
        self._network_dispatcher.start()
        self._network.start()

        self._gossip.start()
        self._journal.start()

        signal_event = threading.Event()

        signal.signal(signal.SIGTERM, lambda sig, fr: signal_event.set())
        # This is where the main thread will be during the bulk of the
        # validator's life.
        while not signal_event.is_set():
            signal_event.wait(timeout=20)

    def stop(self):
        self._gossip.stop()
        self._dispatcher.stop()
        self._network_dispatcher.stop()
        self._network.stop()

        self._service.stop()

        self._process_pool.shutdown(wait=True)
        self._network_thread_pool.shutdown(wait=True)
        self._thread_pool.shutdown(wait=True)

        self._executor.stop()
        self._context_manager.stop()

        self._journal.stop()

        threads = threading.enumerate()

        # This will remove the MainThread, which will exit when we exit with
        # a sys.exit() or exit of main().
        threads.remove(threading.current_thread())

        while len(threads) > 0:
            if len(threads) < 4:
                LOGGER.info(
                    "remaining threads: %s", ", ".join([
                        "{} ({})".format(x.name, x.__class__.__name__)
                        for x in threads
                    ]))
            for t in threads.copy():
                if not t.is_alive():
                    t.join()
                    threads.remove(t)
                if len(threads) > 0:
                    time.sleep(1)

        LOGGER.info("All threads have been stopped and joined")
Beispiel #47
0
    def __init__(self, network_endpoint, component_endpoint, peer_list,
                 data_dir):
        """Constructs a validator instance.

        Args:
            network_endpoint (str): the network endpoint
            component_endpoint (str): the component endpoint
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
        """
        db_filename = os.path.join(
            data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        lmdb = LMDBNoLockDatabase(db_filename, 'n')
        context_manager = ContextManager(lmdb)

        block_db_filename = os.path.join(data_dir, 'block.lmdb')
        LOGGER.debug('block store file is %s', block_db_filename)

        # block_store = LMDBNoLockDatabase(block_db_filename, 'n')
        block_store = {}
        # this is not currently being used but will be something like this
        # in the future, when Journal takes a block_store that isn't a dict

        # setup network
        self._dispatcher = Dispatcher()

        completer = Completer(block_store)

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager), thread_pool)

        self._service = Interconnect(component_endpoint,
                                     self._dispatcher,
                                     secured=False)
        executor = TransactionExecutor(self._service, context_manager)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            ProcessorRegisterHandler(executor.processors), thread_pool)

        identity = hashlib.sha512(time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)

        self._network_dispatcher = Dispatcher()

        # Server public and private keys are hardcoded here due to
        # the decision to avoid having separate identities for each
        # validator's server socket. This is appropriate for a public
        # network. For a permissioned network with requirements for
        # server endpoint authentication at the network level, this can
        # be augmented with a local lookup service for side-band provided
        # endpoint, public_key pairs and a local configuration option
        # for 'server' side private keys.
        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            identity=identity,
            peer_connections=peer_list,
            secured=True,
            server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)',
            server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*')

        self._gossip = Gossip(self._network)

        block_sender = BroadcastBlockSender(completer, self._gossip)

        self._network_dispatcher.add_handler(validator_pb2.Message.GOSSIP_PING,
                                             PingHandler(),
                                             network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(),
            network_thread_pool)
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(), process_pool)
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(gossip=self._gossip), network_thread_pool)
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(completer), network_thread_pool)

        # Create and configure journal
        self._journal = Journal(
            consensus=dev_mode_consensus,
            block_store=block_store,
            block_sender=block_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler())

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            data_dir=data_dir)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(), process_pool)
        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(lmdb,
                                             self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(lmdb,
                                            self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)
Beispiel #48
0
    def __init__(self, network_endpoint, component_endpoint, peer_list):
        data_dir = os.path.expanduser('~')
        db_filename = os.path.join(data_dir,
                                   'merkle-{}.lmdb'.format(
                                       network_endpoint[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        lmdb = LMDBNoLockDatabase(db_filename, 'n')
        context_manager = ContextManager(lmdb)

        block_db_filename = os.path.join(data_dir, 'block.lmdb')
        LOGGER.debug('block store file is %s', block_db_filename)
        block_store = {}
        # block_store = LMDBNoLockDatabase(block_db_filename, 'n')
        # this is not currently being used but will be something like this
        # in the future, when Journal takes a block_store that isn't a dict

        # setup network
        self._dispatcher = Dispatcher()

        completer = Completer()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        process_pool = ProcessPoolExecutor(max_workers=3)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequestHandler(lmdb),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequestHandler(lmdb),
            thread_pool)

        self._service = Interconnect(component_endpoint, self._dispatcher)
        executor = TransactionExecutor(self._service, context_manager)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            ProcessorRegisterHandler(executor.processors),
            thread_pool)

        identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)

        self._network_dispatcher = Dispatcher()

        self._network = Interconnect(
            network_endpoint,
            dispatcher=self._network_dispatcher,
            identity=identity,
            peer_connections=peer_list)

        self._gossip = Gossip(self._network)

        block_sender = BroadcastBlockSender(completer, self._gossip)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_PING,
            PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipMessageHandler(),
            network_thread_pool)
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(),
            process_pool)
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(
                gossip=self._gossip),
            network_thread_pool)
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(
                completer),
            network_thread_pool)

        # Create and configure journal
        self._journal = Journal(
            consensus=dev_mode_consensus,
            block_store=block_store,
            block_sender=block_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler())

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            data_dir=data_dir
        )

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(),
            process_pool)
        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(
                completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequestHandler(lmdb),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequestHandler(lmdb),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequestHandler(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequestHandler(
                self._journal.get_block_store()), thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequestHandler(
                self._journal.get_current_root), thread_pool)
Beispiel #49
0
    def __init__(self, bind_network, bind_component, endpoint,
                 peering, seeds_list, peer_list, data_dir, config_dir,
                 identity_signing_key, scheduler_type,
                 network_public_key=None,
                 network_private_key=None):
        """Constructs a validator instance.

        Args:
            bind_network (str): the network endpoint
            bind_component (str): the component endpoint
            endpoint (str): the zmq-style URI of this validator's
                publically reachable endpoint
            peering (str): The type of peering approach. Either 'static'
                or 'dynamic'. In 'static' mode, no attempted topology
                buildout occurs -- the validator only attempts to initiate
                peering connections with endpoints specified in the
                peer_list. In 'dynamic' mode, the validator will first
                attempt to initiate peering connections with endpoints
                specified in the peer_list and then attempt to do a
                topology buildout starting with peer lists obtained from
                endpoints in the seeds_list. In either mode, the validator
                will accept incoming peer requests up to max_peers.
            seeds_list (list of str): a list of addresses to connect
                to in order to perform the initial topology buildout
            peer_list (list of str): a list of peer addresses
            data_dir (str): path to the data directory
            config_dir (str): path to the config directory
            identity_signing_key (str): key validator uses for signing
        """
        db_filename = os.path.join(data_dir,
                                   'merkle-{}.lmdb'.format(
                                       bind_network[-2:]))
        LOGGER.debug('database file is %s', db_filename)

        merkle_db = LMDBNoLockDatabase(db_filename, 'c')

        delta_db_filename = os.path.join(data_dir,
                                         'state-deltas-{}.lmdb'.format(
                                             bind_network[-2:]))
        LOGGER.debug('state delta store file is %s', delta_db_filename)
        state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c')

        state_delta_store = StateDeltaStore(state_delta_db)

        context_manager = ContextManager(merkle_db, state_delta_store)
        self._context_manager = context_manager

        state_view_factory = StateViewFactory(merkle_db)

        block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(
                                         bind_network[-2:]))
        LOGGER.debug('block store file is %s', block_db_filename)

        block_db = LMDBNoLockDatabase(block_db_filename, 'c')
        block_store = BlockStore(block_db)

        batch_tracker = BatchTracker(block_store)
        block_store.add_update_observer(batch_tracker)

        # setup network
        self._dispatcher = Dispatcher()

        thread_pool = ThreadPoolExecutor(max_workers=10)
        sig_pool = ThreadPoolExecutor(max_workers=3)

        self._thread_pool = thread_pool
        self._sig_pool = sig_pool

        self._service = Interconnect(bind_component,
                                     self._dispatcher,
                                     secured=False,
                                     heartbeat=False,
                                     max_incoming_connections=20,
                                     monitor=True)

        config_file = os.path.join(config_dir, "validator.toml")

        validator_config = {}
        if os.path.exists(config_file):
            with open(config_file) as fd:
                raw_config = fd.read()
            validator_config = toml.loads(raw_config)

        if scheduler_type is None:
            scheduler_type = validator_config.get("scheduler", "serial")

        executor = TransactionExecutor(
            service=self._service,
            context_manager=context_manager,
            settings_view_factory=SettingsViewFactory(
                                    StateViewFactory(merkle_db)),
            scheduler_type=scheduler_type,
            invalid_observers=[batch_tracker])

        self._executor = executor
        self._service.set_check_connections(executor.check_connections)

        state_delta_processor = StateDeltaProcessor(self._service,
                                                    state_delta_store,
                                                    block_store)

        zmq_identity = hashlib.sha512(
            time.time().hex().encode()).hexdigest()[:23]

        network_thread_pool = ThreadPoolExecutor(max_workers=10)
        self._network_thread_pool = network_thread_pool

        self._network_dispatcher = Dispatcher()

        secure = False
        if network_public_key is not None and network_private_key is not None:
            secure = True

        self._network = Interconnect(
            bind_network,
            dispatcher=self._network_dispatcher,
            zmq_identity=zmq_identity,
            secured=secure,
            server_public_key=network_public_key,
            server_private_key=network_private_key,
            heartbeat=True,
            public_endpoint=endpoint,
            connection_timeout=30,
            max_incoming_connections=100)

        self._gossip = Gossip(self._network,
                              endpoint=endpoint,
                              peering_mode=peering,
                              initial_seed_endpoints=seeds_list,
                              initial_peer_endpoints=peer_list,
                              minimum_peer_connectivity=3,
                              maximum_peer_connectivity=10,
                              topology_check_frequency=1)

        completer = Completer(block_store, self._gossip)

        block_sender = BroadcastBlockSender(completer, self._gossip)
        batch_sender = BroadcastBatchSender(completer, self._gossip)
        chain_id_manager = ChainIdManager(data_dir)
        # Create and configure journal
        self._journal = Journal(
            block_store=block_store,
            state_view_factory=StateViewFactory(merkle_db),
            block_sender=block_sender,
            batch_sender=batch_sender,
            transaction_executor=executor,
            squash_handler=context_manager.get_squash_handler(),
            identity_signing_key=identity_signing_key,
            chain_id_manager=chain_id_manager,
            state_delta_processor=state_delta_processor,
            data_dir=data_dir,
            config_dir=config_dir,
            check_publish_block_frequency=0.1,
            block_cache_purge_frequency=30,
            block_cache_keep_time=300,
            batch_observers=[batch_tracker]
        )

        self._genesis_controller = GenesisController(
            context_manager=context_manager,
            transaction_executor=executor,
            completer=completer,
            block_store=block_store,
            state_view_factory=state_view_factory,
            identity_key=identity_signing_key,
            data_dir=data_dir,
            config_dir=config_dir,
            chain_id_manager=chain_id_manager,
            batch_sender=batch_sender
        )

        responder = Responder(completer)

        completer.set_on_batch_received(self._journal.on_batch_received)
        completer.set_on_block_received(self._journal.on_block_received)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_GET_REQUEST,
            tp_state_handlers.TpStateGetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_STATE_SET_REQUEST,
            tp_state_handlers.TpStateSetHandler(context_manager),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_REGISTER_REQUEST,
            processor_handlers.ProcessorRegisterHandler(executor.processors),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.TP_UNREGISTER_REQUEST,
            processor_handlers.ProcessorUnRegisterHandler(executor.processors),
            thread_pool)

        # Set up base network handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_PING,
            PingHandler(),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_CONNECT,
            ConnectHandler(network=self._network),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.NETWORK_DISCONNECT,
            DisconnectHandler(network=self._network),
            network_thread_pool)

        # Set up gossip handlers
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
            GetPeersRequestHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
            GetPeersResponseHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_REGISTER,
            PeerRegisterHandler(gossip=self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_UNREGISTER,
            PeerUnregisterHandler(gossip=self._gossip),
            network_thread_pool)

        # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipMessageHandler(),
            network_thread_pool)

        # GOSSIP_MESSAGE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            signature_verifier.GossipMessageSignatureVerifier(),
            sig_pool)

        # GOSSIP_MESSAGE 3) Verifies batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            structure_verifier.GossipHandlerStructureVerifier(),
            network_thread_pool)

        # GOSSIP_MESSAGE 4) Determines if we should broadcast the
        # message to our peers. It is important that this occur prior
        # to the sending of the message to the completer, as this step
        # relies on whether the  gossip message has previously been
        # seen by the validator to determine whether or not forwarding
        # should occur
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            GossipBroadcastHandler(
                gossip=self._gossip,
                completer=completer),
            network_thread_pool)

        # GOSSIP_MESSAGE 5) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_MESSAGE,
            CompleterGossipHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
            BlockResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            GossipBlockResponseHandler(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            signature_verifier.GossipBlockResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BLOCK_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            structure_verifier.GossipBlockResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BLOCK_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            CompleterGossipBlockResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BLOCK_RESPONSE,
            ResponderBlockResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST,
            BatchByBatchIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST,
            BatchByTransactionIdResponderHandler(responder, self._gossip),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            GossipBatchResponseHandler(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 2) Verifies signature
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            signature_verifier.GossipBatchResponseSignatureVerifier(),
            sig_pool)

        # GOSSIP_BATCH_RESPONSE 3) Check batch structure
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            structure_verifier.GossipBatchResponseStructureVerifier(),
            network_thread_pool)

        # GOSSIP_BATCH_RESPONSE 4) Send message to completer
        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            CompleterGossipBatchResponseHandler(
                completer),
            network_thread_pool)

        self._network_dispatcher.add_handler(
            validator_pb2.Message.GOSSIP_BATCH_RESPONSE,
            ResponderBatchResponseHandler(responder, self._gossip),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            BatchListPermissionVerifier(
                settings_view_factory=SettingsViewFactory(
                    StateViewFactory(merkle_db)),
                current_root_func=self._journal.get_current_root,
            ),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            signature_verifier.BatchListSignatureVerifier(),
            sig_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            structure_verifier.BatchListStructureVerifier(),
            network_thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            CompleterBatchListBroadcastHandler(
                completer, self._gossip),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST,
            client_handlers.BatchSubmitFinisher(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST,
            client_handlers.BatchStatusRequest(batch_tracker),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_LIST_REQUEST,
            client_handlers.StateListRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_GET_REQUEST,
            client_handlers.StateGetRequest(
                merkle_db,
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST,
            client_handlers.BlockListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST,
            client_handlers.BlockGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST,
            client_handlers.BatchListRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_BATCH_GET_REQUEST,
            client_handlers.BatchGetRequest(self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST,
            client_handlers.TransactionListRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST,
            client_handlers.TransactionGetRequest(
                self._journal.get_block_store()),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST,
            client_handlers.StateCurrentRequest(
                self._journal.get_current_root), thread_pool)

        # State Delta Subscription Handlers
        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaSubscriberValidationHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST,
            StateDeltaAddSubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST,
            StateDeltaUnsubscriberHandler(state_delta_processor),
            thread_pool)

        self._dispatcher.add_handler(
            validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST,
            StateDeltaGetEventsHandler(block_store, state_delta_store),
            thread_pool)