def test_state_verifier(self): blockstore = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) global_state_db = NativeLmdbDatabase( os.path.join(self._temp_dir, 'test_state_verifier.lmdb'), indexes=MerkleDatabase.create_index_configuration()) precalculated_state_roots = [ "e35490eac6f77453675c3399da7efe451e791272bbc8cf1b032c75030fb455c3", "3a369eb951171895c00ba2ffd04bfa1ef98d6ee651f96a65ae3280cf8d67d5e7", "797e70e29915c9129f950b2084ed0e3c09246bd1e6c232571456f51ca85df340", ] signer = get_signer() populate_blockstore(blockstore, signer, precalculated_state_roots) verify_state( global_state_db, blockstore, "tcp://eth0:4004", "serial") # There is a bug in the shutdown code for some component this depends # on, which causes it to occassionally hang during shutdown. Just kill # the process for now. # pylint: disable=protected-access os._exit(0)
def setUp(self): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) self.gossip = MockGossip() self.completer = Completer( block_manager=self.block_manager, transaction_committed=self.block_store.has_transaction, get_committed_batch_by_id=self.block_store.get_batch, get_committed_batch_by_txn_id=( self.block_store.get_batch_by_transaction ), get_chain_head=lambda: self.block_store.chain_head, gossip=self.gossip) self.completer.set_on_block_received(self._on_block_received) self.completer.set_on_batch_received(self._on_batch_received) self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def make_block_store(blocks=None): block_dir = tempfile.mkdtemp() block_db = NativeLmdbDatabase(os.path.join(block_dir, 'block.lmdb'), BlockStore.create_index_configuration()) block_store = BlockStore(block_db) if blocks is not None: block_store.put_blocks(blocks) return block_store
def test_iterate_chain_on_empty_block_store(self): """Given a block store with no blocks, iterate using predecessor iterator and verify that it results in an empty list. """ block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.assertEqual([], [b for b in block_store.get_predecessor_iter()])
def __init__(self, size=3, start='0'): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) super().__init__(self.block_db) for i in range(size): self.add_block(_increment_key(start, i))
def __init__(self, size=3, start='0'): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) super().__init__(self.block_db) for i in range(size): self.add_block(_increment_key(start, i))
def make_block_store(blocks=None): block_dir = tempfile.mkdtemp() block_db = NativeLmdbDatabase( os.path.join(block_dir, 'block.lmdb'), BlockStore.create_index_configuration()) block_store = BlockStore(block_db) if blocks is not None: block_store.put_blocks(blocks) return block_store
def setUp(self): self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_private_key() self.public_key = signing.generate_public_key(self.private_key) self.blocks = [] self.batches = []
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) self.block_cache = BlockCache(self.block_store) self.state_db = {} self.block_manager = BlockManager() self.block_manager.add_store("commit_store", self.block_store) # add the mock reference to the consensus consensus_setting_addr = SettingsView.setting_address( 'sawtooth.consensus.algorithm') self.state_db[consensus_setting_addr] = _setting_entry( 'sawtooth.consensus.algorithm', 'test_journal.mock_consensus') self.state_view_factory = MockStateViewFactory(self.state_db) context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) identity_private_key = context.new_random_private_key() self.identity_signer = crypto_factory.new_signer(identity_private_key) chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() chain_head = self.genesis_block self.block_manager.put([chain_head.block]) self.block_manager.persist(chain_head.block.header_signature, "commit_store") self.block_publisher = BlockPublisher( block_manager=self.block_manager, transaction_executor=MockTransactionExecutor(), transaction_committed=self.block_store.has_transaction, batch_committed=self.block_store.has_batch, state_view_factory=self.state_view_factory, settings_cache=SettingsCache( SettingsViewFactory(self.state_view_factory), ), block_sender=self.block_sender, batch_sender=self.block_sender, chain_head=chain_head.block, identity_signer=self.identity_signer, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), batch_observers=[])
def setUp(self): self.block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def setUp(self): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.receipt_store = TransactionReceiptStore(DictDatabase()) self._txn_ids_by_block_id = {} for block_id, blk_w, txn_ids in create_chain(): self.block_store.put_blocks([blk_w.block]) self._txn_ids_by_block_id[block_id] = txn_ids for txn_id in txn_ids: receipt = create_receipt(txn_id=txn_id, key_values=[("address", block_id)]) self.receipt_store.put(txn_id=txn_id, txn_receipt=receipt)
def test_state_verifier(self): blockstore = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) global_state_db = DictDatabase() precalculated_state_roots = [ "e35490eac6f77453675c3399da7efe451e791272bbc8cf1b032c75030fb455c3", "3a369eb951171895c00ba2ffd04bfa1ef98d6ee651f96a65ae3280cf8d67d5e7", "797e70e29915c9129f950b2084ed0e3c09246bd1e6c232571456f51ca85df340", ] signer = get_signer() populate_blockstore(blockstore, signer, precalculated_state_roots) verify_state(global_state_db, blockstore, "tcp://eth0:4004", "serial")
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_cache = BlockCache(self.block_store) self.state_db = NativeLmdbDatabase( os.path.join(self.dir, "merkle.lmdb"), MerkleDatabase.create_index_configuration()) self.state_view_factory = NativeStateViewFactory(self.state_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) identity_private_key = context.new_random_private_key() self.identity_signer = crypto_factory.new_signer(identity_private_key) chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() chain_head = self.genesis_block self.block_manager.put([chain_head.block]) self.block_manager.persist( chain_head.block.header_signature, "commit_store") self.block_publisher = BlockPublisher( block_manager=self.block_manager, transaction_executor=MockTransactionExecutor(), transaction_committed=self.block_store.has_transaction, batch_committed=self.block_store.has_batch, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, chain_head=chain_head.block, identity_signer=self.identity_signer, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), batch_observers=[])
def create_chain_commit_state( self, committed_blocks, uncommitted_blocks, head_id, ): block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) block_store.update_chain(committed_blocks) block_cache = BlockCache(block_store=block_store) for block in uncommitted_blocks: block_cache[block.header_signature] = block return ChainCommitState(head_id, block_cache, block_store)
def test_iterate_chain(self): """Given a block store, create an predecessor iterator. 1. Create a chain of length 5. 2. Iterate the chain using the get_predecessor_iter from the chain head 3. Verify that the block ids match the chain, in reverse order """ block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) chain = self._create_chain(5) block_store.update_chain(chain) ids = [b.identifier for b in block_store.get_predecessor_iter()] self.assertEqual(['abcd4', 'abcd3', 'abcd2', 'abcd1', 'abcd0'], ids)
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_cache = BlockCache(self.block_store) self.state_db = NativeLmdbDatabase( os.path.join(self.dir, "merkle.lmdb"), MerkleDatabase.create_index_configuration()) self.state_view_factory = NativeStateViewFactory(self.state_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) identity_private_key = context.new_random_private_key() self.identity_signer = crypto_factory.new_signer(identity_private_key) chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() chain_head = self.genesis_block self.block_manager.put([chain_head.block]) self.block_manager.persist(chain_head.block.header_signature, "commit_store") self.block_publisher = BlockPublisher( block_manager=self.block_manager, transaction_executor=MockTransactionExecutor(), transaction_committed=self.block_store.has_transaction, batch_committed=self.block_store.has_batch, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, chain_head=chain_head.block, identity_signer=self.identity_signer, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), batch_observers=[])
def setUp(self): self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.completer._has_block = self._has_block self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def setUp(self): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.receipt_store = TransactionReceiptStore(DictDatabase()) self._txn_ids_by_block_id = {} for block_id, blk_w, txn_ids in create_chain(): self.block_store.put_blocks([blk_w.block]) self._txn_ids_by_block_id[block_id] = txn_ids for txn_id in txn_ids: receipt = create_receipt(txn_id=txn_id, key_values=[("address", block_id)]) self.receipt_store.put( txn_id=txn_id, txn_receipt=receipt)
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.block_cache = BlockCache(self.block_store) self.state_db = {} # add the mock reference to the consensus consensus_setting_addr = SettingsView.setting_address( 'sawtooth.consensus.algorithm') self.state_db[consensus_setting_addr] = _setting_entry( 'sawtooth.consensus.algorithm', 'test_journal.mock_consensus') self.state_view_factory = MockStateViewFactory(self.state_db) context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) identity_private_key = context.new_random_private_key() self.identity_signer = crypto_factory.new_signer(identity_private_key) chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() self.set_chain_head(self.genesis_block) chain_head = self.genesis_block self.block_publisher = BlockPublisher( transaction_executor=MockTransactionExecutor(), block_cache=self.block_cache, state_view_factory=self.state_view_factory, settings_cache=SettingsCache( SettingsViewFactory(self.state_view_factory), ), block_sender=self.block_sender, batch_sender=self.block_sender, squash_handler=None, chain_head=chain_head, identity_signer=self.identity_signer, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), check_publish_block_frequency=0.1, batch_observers=[])
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.block_cache = BlockCache(self.block_store) self.state_db = {} # add the mock reference to the consensus consensus_setting_addr = SettingsView.setting_address( 'sawtooth.consensus.algorithm') self.state_db[consensus_setting_addr] = _setting_entry( 'sawtooth.consensus.algorithm', 'test_journal.mock_consensus') self.state_view_factory = MockStateViewFactory(self.state_db) context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) identity_private_key = context.new_random_private_key() self.identity_signer = crypto_factory.new_signer(identity_private_key) chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() self.set_chain_head(self.genesis_block) chain_head = self.genesis_block self.block_publisher = BlockPublisher( transaction_executor=MockTransactionExecutor(), block_cache=self.block_cache, state_view_factory=self.state_view_factory, settings_cache=SettingsCache( SettingsViewFactory(self.state_view_factory), ), block_sender=self.block_sender, batch_sender=self.block_sender, squash_handler=None, chain_head=chain_head, identity_signer=self.identity_signer, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), check_publish_block_frequency=0.1, batch_observers=[])
def get_databases(bind_network, data_dir): # Get the global state database to operate on global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('verifying state in %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) # Get the blockstore block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) return global_state_db, blockstore
def test_iterate_chain(self): """Given a block store, create an predecessor iterator. 1. Create a chain of length 5. 2. Iterate the chain using the get_predecessor_iter from the chain head 3. Verify that the block ids match the chain, in reverse order """ block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) chain = self._create_chain(5) block_store.update_chain(chain) ids = [b.identifier for b in block_store.get_predecessor_iter()] self.assertEqual( ['abcd4', 'abcd3', 'abcd2', 'abcd1', 'abcd0'], ids)
def test_iterate_chain_from_starting_block(self): """Given a block store, iterate if using an predecessor iterator from a particular start point in the chain. 1. Create a chain of length 5. 2. Iterate the chain using the get_predecessor_iter from block 3 3. Verify that the block ids match the chain, in reverse order """ block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) chain = self._create_chain(5) block_store.update_chain(chain) block = block_store['abcd2'] ids = [b.identifier for b in block_store.get_predecessor_iter(block)] self.assertEqual(['abcd2', 'abcd1', 'abcd0'], ids)
def get_databases(bind_network, data_dir): # Get the global state database to operate on global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug( 'verifying state in %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) # Get the blockstore block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) return global_state_db, blockstore
def test_state_verifier(self): blockstore = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) global_state_db = DictDatabase() precalculated_state_roots = [ "e35490eac6f77453675c3399da7efe451e791272bbc8cf1b032c75030fb455c3", "3a369eb951171895c00ba2ffd04bfa1ef98d6ee651f96a65ae3280cf8d67d5e7", "797e70e29915c9129f950b2084ed0e3c09246bd1e6c232571456f51ca85df340", ] signer = get_signer() populate_blockstore(blockstore, signer, precalculated_state_roots) verify_state(global_state_db, blockstore, "tcp://eth0:4004", "serial") # There is a bug in the shutdown code for some component this depends # on, which causes it to occassionally hang during shutdown. Just kill # the process for now. # pylint: disable=protected-access os._exit(0)
def test_iterate_chain_from_starting_block(self): """Given a block store, iterate if using an predecessor iterator from a particular start point in the chain. 1. Create a chain of length 5. 2. Iterate the chain using the get_predecessor_iter from block 3 3. Verify that the block ids match the chain, in reverse order """ block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) chain = self._create_chain(5) block_store.update_chain(chain) block = block_store['abcd2'] ids = [b.identifier for b in block_store.get_predecessor_iter(block)] self.assertEqual( ['abcd2', 'abcd1', 'abcd0'], ids)
def get_databases(bind_network, data_dir=None): # Get the global state database to operate on if data_dir is not None: global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('verifying state in %s', global_state_db_filename) global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c') else: global_state_db = DictDatabase() # Get the blockstore block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase(block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) return global_state_db, blockstore
def verify_state(bind_network, bind_component, scheduler_type, data_dir=None): """ Verify the state root hash of all blocks is in state and if not, reconstruct the missing state. Assumes that there are no "holes" in state, ie starting from genesis, state is present for all blocks up to some point and then not at all. If persist is False, this recomputes state in memory for all blocks in the blockstore and verifies the state root hashes. Raises: InvalidChainError: The chain in the blockstore is not valid. ExecutionError: An unrecoverable error was encountered during batch execution. """ # Get the global state database to operate on if data_dir is not None: global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('verifying state in %s', global_state_db_filename) global_state_db = LMDBNoLockDatabase(global_state_db_filename, 'c') else: global_state_db = DictDatabase() state_view_factory = StateViewFactory(global_state_db) # Get the blockstore block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase(block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) blockstore = BlockStore(block_db) # Check if we should do state verification start_block, prev_state_root = search_for_present_state_root( blockstore, state_view_factory) if start_block is None: LOGGER.info( "Skipping state verification: chain head's state root is present") return LOGGER.info("Recomputing missing state from block %s with %s scheduler", start_block, scheduler_type) component_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Component') component_dispatcher = Dispatcher() component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) context_manager = ContextManager(global_state_db) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[]) component_service.set_check_connections( transaction_executor.check_connections) component_dispatcher.add_handler( validator_pb2.Message.TP_RECEIPT_ADD_DATA_REQUEST, tp_state_handlers.TpReceiptAddDataHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_EVENT_ADD_REQUEST, tp_state_handlers.TpEventAddHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_DELETE_REQUEST, tp_state_handlers.TpStateDeleteHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler( transaction_executor.processors), component_thread_pool) component_dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler( transaction_executor.processors), component_thread_pool) component_dispatcher.start() component_service.start() process_blocks(initial_state_root=prev_state_root, blocks=blockstore.get_block_iter(start_block=start_block, reverse=False), transaction_executor=transaction_executor, context_manager=context_manager, state_view_factory=state_view_factory) component_dispatcher.stop() component_service.stop() component_thread_pool.shutdown(wait=True) transaction_executor.stop() context_manager.stop()
def make_block_store(data=None): return BlockStore( DictDatabase( data, indexes=BlockStore.create_index_configuration()))
def make_block_store(data=None): return BlockStore( DictDatabase(data, indexes=BlockStore.create_index_configuration()))
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, fork_cache_keep_time, network_public_key=None, network_private_key=None, roles=None, component_thread_pool_workers=10, network_thread_pool_workers=10, signature_thread_pool_workers=3): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing component_thread_pool_workers (int): number of workers in the component thread pool; defaults to 10. network_thread_pool_workers (int): number of workers in the network thread pool; defaults to 10. signature_thread_pool_workers (int): number of workers in the signature thread pool; defaults to 3. """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug( 'global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) native_state_view_factory = NativeStateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_manager = BlockManager() block_manager.add_commit_store(block_store) block_status_store = BlockValidationResultStore() # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=component_thread_pool_workers, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor( max_workers=network_thread_pool_workers, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor( max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor( max_workers=signature_thread_pool_workers, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect( bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect( bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store.has_batch) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster( component_service, block_store, receipt_store) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect( bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, max_future_callback_workers=10) consensus_registry = ConsensusRegistry() consensus_notifier = ConsensusNotifier( consensus_service, consensus_registry, identity_signer.get_public_key().as_hex()) # -- Setup P2P Networking -- # gossip = Gossip( network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, consensus_notifier, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1 ) consensus_notifier.set_gossip(gossip) completer = Completer( block_manager=block_manager, transaction_committed=block_store.has_transaction, get_committed_batch_by_id=block_store.get_batch, get_committed_batch_by_txn_id=( block_store.get_batch_by_transaction ), get_chain_head=lambda: unwrap_if_not_none(block_store.chain_head), gossip=gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) self._completer = completer block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver( to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( block_manager=block_manager, transaction_executor=transaction_executor, transaction_committed=block_store.has_transaction, batch_committed=block_store.has_batch, state_view_factory=native_state_view_factory, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_validator = BlockValidator( block_manager=block_manager, view_factory=native_state_view_factory, transaction_executor=transaction_executor, block_status_store=block_status_store, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_manager=block_manager, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, block_status_store=block_status_store, consensus_notifier=consensus_notifier, consensus_registry=consensus_registry, state_pruning_block_depth=state_pruning_block_depth, fork_cache_keep_time=fork_cache_keep_time, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, block_manager=block_manager, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender, receipt_store=receipt_store) responder = Responder(completer) completer.set_on_block_received(chain_controller.queue_block) self._incoming_batch_sender = None # -- Register Message Handler -- # network_handlers.add( network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, lambda block_id: block_id in block_manager, self.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add( component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher, identity_signer.get_public_key().as_hex()) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_manager=block_manager, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory, consensus_registry=consensus_registry, consensus_notifier=consensus_notifier) consensus_handlers.add( consensus_dispatcher, consensus_thread_pool, consensus_proxy, consensus_notifier) self._block_status_store = block_status_store self._consensus_notifier = consensus_notifier self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._consensus_registry = consensus_registry self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._block_validator = block_validator self._chain_controller = chain_controller self._block_validator = block_validator
import hashlib import cbor from sawtooth_signing import create_context from sawtooth_signing import CryptoFactory from test_completer.mock import MockGossip from sawtooth_validator.journal.completer import Completer from sawtooth_validator.database.dict_database import DictDatabase from sawtooth_validator.journal.block_store import BlockStore from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER from sawtooth_validator.protobuf.batch_pb2 import BatchHeader, Batch from sawtooth_validator.protobuf.block_pb2 import BlockHeader, Block from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader, Transaction block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) gossip = MockGossip() context = create_context('secp256k1') crypto_factory = CryptoFactory(context) private_key = context.new_random_private_key() signer = crypto_factory.new_signer(private_key) completer = Completer(block_store, gossip) def _create_transactions(count, missing_dep=False): txn_list = [] for _ in range(count): payload = { 'Verb': 'set', 'Name': 'name' + str(random.randint(0, 100)),
def clear(self): self._block_store = DictDatabase( indexes=BlockStore.create_index_configuration())
def __init__(self, size=3, start='0'): super().__init__(DictDatabase( indexes=BlockStore.create_index_configuration())) for i in range(size): self.add_block(_increment_key(start, i))
def __init__(self, size=3, start='0'): super().__init__(DictDatabase( indexes=BlockStore.create_index_configuration())) for i in range(size): self.add_block(_increment_key(start, i))
def clear(self): self._block_store = DictDatabase( indexes=BlockStore.create_index_configuration())
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, network_public_key=None, network_private_key=None, roles=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase( block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_cache = BlockCache(block_store, keep_time=int(base_keep_time * 9 / 8), purge_frequency=30) # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=10, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor(max_workers=3, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect(bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster(component_service, block_store, receipt_store) # -- Setup P2P Networking -- # gossip = Gossip(network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1) completer = Completer(block_store, gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect(bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) consensus_notifier = ConsensusNotifier(consensus_service) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( block_cache=block_cache, state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( transaction_executor=transaction_executor, block_cache=block_cache, state_view_factory=state_view_factory, settings_cache=settings_cache, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, check_publish_block_frequency=0.1, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_publisher_batch_sender = block_publisher.batch_sender() block_validator = BlockValidator( block_cache=block_cache, state_view_factory=state_view_factory, transaction_executor=transaction_executor, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_cache=block_cache, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, state_pruning_block_depth=state_pruning_block_depth, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(block_publisher_batch_sender.send) completer.set_on_block_received(chain_controller.queue_block) completer.set_chain_has_block(chain_controller.has_block) # -- Register Message Handler -- # network_handlers.add(network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, chain_controller.has_block, block_publisher.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add(component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_cache=block_cache, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory) consensus_handlers.add(consensus_dispatcher, consensus_thread_pool, consensus_proxy) self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._chain_controller = chain_controller self._block_validator = block_validator
from test_journal.block_tree_manager import BlockTreeManager from sawtooth_validator.database.dict_database import DictDatabase from sawtooth_validator.journal.block_store import BlockStore if __name__ == '__main__': print("\n====== cProfile: ./validator/cprof_block_store.py ======\n") pr = cProfile.Profile() pr.enable() block_tree_manager = BlockTreeManager() block = block_tree_manager.create_block() block_store = BlockStore( DictDatabase({ block.header_signature: block, }, indexes=BlockStore.create_index_configuration())) block_store.update_chain([block]) batch_id = block.batches[0].header_signature stored = block_store.get_block_by_batch_id(batch_id) batch = block.batches[0] txn_id = batch.transactions[0].header_signature stored = block_store.get_batch_by_transaction(txn_id) batch_id = batch.header_signature stored_batch = block_store.get_batch(batch_id) pr.disable() pr.print_stats(sort='time')