def setUp(self): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) self.gossip = MockGossip() self.completer = Completer( block_manager=self.block_manager, transaction_committed=self.block_store.has_transaction, get_committed_batch_by_id=self.block_store.get_batch, get_committed_batch_by_txn_id=( self.block_store.get_batch_by_transaction), gossip=self.gossip) self.completer.set_get_chain_head(lambda: self.block_store.chain_head) self.completer.set_on_block_received(self._on_block_received) self.completer.set_on_batch_received(self._on_batch_received) self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def setUp(self): self.block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) self.block_manager = BlockManager() self.block_manager.add_store("commit_store", self.block_store) self.gossip = MockGossip() self.completer = Completer( block_manager=self.block_manager, transaction_committed=self.block_store.has_transaction, get_committed_batch_by_id=self.block_store.get_batch, get_committed_batch_by_txn_id=( self.block_store.get_batch_by_transaction), get_chain_head=lambda: self.block_store.chain_head, gossip=self.gossip) self.completer.set_on_block_received(self._on_block_received) self.completer.set_on_batch_received(self._on_batch_received) self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def setUp(self): self.block_store = BlockStore({}) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_privkey() self.public_key = signing.generate_pubkey(self.private_key) self.blocks = [] self.batches = []
def setUp(self): self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_private_key() self.public_key = signing.generate_public_key(self.private_key) self.blocks = [] self.batches = []
def setUp(self): self.block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def setUp(self): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) self.gossip = MockGossip() self.completer = Completer( block_manager=self.block_manager, transaction_committed=self.block_store.has_transaction, get_committed_batch_by_id=self.block_store.get_batch, get_committed_batch_by_txn_id=( self.block_store.get_batch_by_transaction ), get_chain_head=lambda: self.block_store.chain_head, gossip=self.gossip) self.completer.set_on_block_received(self._on_block_received) self.completer.set_on_batch_received(self._on_batch_received) self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def setUp(self): self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.completer._has_block = self._has_block self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = []
def __init__(self, network_endpoint, component_endpoint, peer_list, data_dir, identity_signing_key): """Constructs a validator instance. Args: network_endpoint (str): the network endpoint component_endpoint (str): the component endpoint peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory key_dir (str): path to the key directory """ db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:])) LOGGER.debug('database file is %s', db_filename) merkle_db = LMDBNoLockDatabase(db_filename, 'c') context_manager = ContextManager(merkle_db) self._context_manager = context_manager state_view_factory = StateViewFactory(merkle_db) block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(network_endpoint[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = LMDBNoLockDatabase(block_db_filename, 'c') block_store = BlockStore(block_db) # setup network self._dispatcher = Dispatcher() thread_pool = ThreadPoolExecutor(max_workers=10) process_pool = ProcessPoolExecutor(max_workers=3) self._thread_pool = thread_pool self._process_pool = process_pool self._service = Interconnect(component_endpoint, self._dispatcher, secured=False, heartbeat=False, max_incoming_connections=20) executor = TransactionExecutor(service=self._service, context_manager=context_manager, config_view_factory=ConfigViewFactory( StateViewFactory(merkle_db))) self._executor = executor zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_thread_pool = network_thread_pool self._network_dispatcher = Dispatcher() # Server public and private keys are hardcoded here due to # the decision to avoid having separate identities for each # validator's server socket. This is appropriate for a public # network. For a permissioned network with requirements for # server endpoint authentication at the network level, this can # be augmented with a local lookup service for side-band provided # endpoint, public_key pairs and a local configuration option # for 'server' side private keys. self._network = Interconnect( network_endpoint, dispatcher=self._network_dispatcher, zmq_identity=zmq_identity, secured=True, server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)', server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*', heartbeat=True, connection_timeout=30, max_incoming_connections=100) self._gossip = Gossip(self._network, initial_peer_endpoints=peer_list) completer = Completer(block_store, self._gossip) block_sender = BroadcastBlockSender(completer, self._gossip) batch_sender = BroadcastBatchSender(completer, self._gossip) chain_id_manager = ChainIdManager(data_dir) # Create and configure journal self._journal = Journal( block_store=block_store, state_view_factory=StateViewFactory(merkle_db), block_sender=block_sender, batch_sender=batch_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler(), identity_signing_key=identity_signing_key, chain_id_manager=chain_id_manager, data_dir=data_dir, check_publish_block_frequency=0.1, block_cache_purge_frequency=30, block_cache_keep_time=300) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_key=identity_signing_key, data_dir=data_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(executor.processors), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(executor.processors), thread_pool) # Set up base network handlers self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_CONNECT, ConnectHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_DISCONNECT, DisconnectHandler(network=self._network), network_thread_pool) # Set up gossip handlers self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(gossip=self._gossip), network_thread_pool) # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) # GOSSIP_MESSAGE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), process_pool) # GOSSIP_MESSAGE 3) Determines if we should broadcast the # message to our peers. It is important that this occur prior # to the sending of the message to the completer, as this step # relies on whether the gossip message has previously been # seen by the validator to determine whether or not forwarding # should occur self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler(gossip=self._gossip, completer=completer), network_thread_pool) # GOSSIP_MESSAGE 4) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler(completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_REQUEST, BlockResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, GossipBlockResponseHandler(), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, signature_verifier.GossipBlockResponseSignatureVerifier(), process_pool) # GOSSIP_BLOCK_RESPONSE 3) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, CompleterGossipBlockResponseHandler(completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST, BatchByBatchIdResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST, BatchByTransactionIdResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, GossipBatchResponseHandler(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, signature_verifier.GossipBatchResponseSignatureVerifier(), process_pool) # GOSSIP_BATCH_RESPONSE 3) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, CompleterGossipBatchResponseHandler(completer), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), process_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler(completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, client_handlers.BatchSubmitFinisher( self._journal.get_block_store(), completer.batch_cache), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST, client_handlers.BatchStatusRequest(self._journal.get_block_store(), completer.batch_cache), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest(merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST, client_handlers.BatchListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_GET_REQUEST, client_handlers.BatchGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool)
def __init__(self, network_endpoint, component_endpoint, peer_list, data_dir): """Constructs a validator instance. Args: network_endpoint (str): the network endpoint component_endpoint (str): the component endpoint peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory """ db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(network_endpoint[-2:])) LOGGER.debug('database file is %s', db_filename) lmdb = LMDBNoLockDatabase(db_filename, 'n') context_manager = ContextManager(lmdb) block_db_filename = os.path.join(data_dir, 'block.lmdb') LOGGER.debug('block store file is %s', block_db_filename) # block_store = LMDBNoLockDatabase(block_db_filename, 'n') block_store = {} # this is not currently being used but will be something like this # in the future, when Journal takes a block_store that isn't a dict # setup network self._dispatcher = Dispatcher() completer = Completer(block_store) thread_pool = ThreadPoolExecutor(max_workers=10) process_pool = ProcessPoolExecutor(max_workers=3) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._service = Interconnect(component_endpoint, self._dispatcher, secured=False) executor = TransactionExecutor(self._service, context_manager) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, ProcessorRegisterHandler(executor.processors), thread_pool) identity = hashlib.sha512(time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_dispatcher = Dispatcher() # Server public and private keys are hardcoded here due to # the decision to avoid having separate identities for each # validator's server socket. This is appropriate for a public # network. For a permissioned network with requirements for # server endpoint authentication at the network level, this can # be augmented with a local lookup service for side-band provided # endpoint, public_key pairs and a local configuration option # for 'server' side private keys. self._network = Interconnect( network_endpoint, dispatcher=self._network_dispatcher, identity=identity, peer_connections=peer_list, secured=True, server_public_key=b'wFMwoOt>yFqI/ek.G[tfMMILHWw#vXB[Sv}>l>i)', server_private_key=b'r&oJ5aQDj4+V]p2:Lz70Eu0x#m%IwzBdP(}&hWM*') self._gossip = Gossip(self._network) block_sender = BroadcastBlockSender(completer, self._gossip) self._network_dispatcher.add_handler(validator_pb2.Message.GOSSIP_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), process_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler(completer), network_thread_pool) # Create and configure journal self._journal = Journal( consensus=dev_mode_consensus, block_store=block_store, block_sender=block_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler()) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, data_dir=data_dir) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), process_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler(completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest(lmdb, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest(lmdb, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool)
def __init__(self, network_endpoint, component_endpoint, peer_list): data_dir = os.path.expanduser('~') db_filename = os.path.join(data_dir, 'merkle-{}.lmdb'.format( network_endpoint[-2:])) LOGGER.debug('database file is %s', db_filename) lmdb = LMDBNoLockDatabase(db_filename, 'n') context_manager = ContextManager(lmdb) block_db_filename = os.path.join(data_dir, 'block.lmdb') LOGGER.debug('block store file is %s', block_db_filename) block_store = {} # block_store = LMDBNoLockDatabase(block_db_filename, 'n') # this is not currently being used but will be something like this # in the future, when Journal takes a block_store that isn't a dict # setup network self._dispatcher = Dispatcher() completer = Completer() thread_pool = ThreadPoolExecutor(max_workers=10) process_pool = ProcessPoolExecutor(max_workers=3) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequestHandler(lmdb), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequestHandler(lmdb), thread_pool) self._service = Interconnect(component_endpoint, self._dispatcher) executor = TransactionExecutor(self._service, context_manager) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, ProcessorRegisterHandler(executor.processors), thread_pool) identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_dispatcher = Dispatcher() self._network = Interconnect( network_endpoint, dispatcher=self._network_dispatcher, identity=identity, peer_connections=peer_list) self._gossip = Gossip(self._network) block_sender = BroadcastBlockSender(completer, self._gossip) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), process_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler( gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler( completer), network_thread_pool) # Create and configure journal self._journal = Journal( consensus=dev_mode_consensus, block_store=block_store, block_sender=block_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler()) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, data_dir=data_dir ) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), process_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler( completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequestHandler(lmdb), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequestHandler(lmdb), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequestHandler( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequestHandler( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequestHandler( self._journal.get_current_root), thread_pool)
def __init__(self, bind_network, bind_component, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signing_key, scheduler_type, network_public_key=None, network_private_key=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signing_key (str): key validator uses for signing """ db_filename = os.path.join(data_dir, 'merkle-{}.lmdb'.format( bind_network[-2:])) LOGGER.debug('database file is %s', db_filename) merkle_db = LMDBNoLockDatabase(db_filename, 'c') delta_db_filename = os.path.join(data_dir, 'state-deltas-{}.lmdb'.format( bind_network[-2:])) LOGGER.debug('state delta store file is %s', delta_db_filename) state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c') state_delta_store = StateDeltaStore(state_delta_db) context_manager = ContextManager(merkle_db, state_delta_store) self._context_manager = context_manager state_view_factory = StateViewFactory(merkle_db) block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format( bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = LMDBNoLockDatabase(block_db_filename, 'c') block_store = BlockStore(block_db) batch_tracker = BatchTracker(block_store) block_store.add_update_observer(batch_tracker) # setup network self._dispatcher = Dispatcher() thread_pool = ThreadPoolExecutor(max_workers=10) sig_pool = ThreadPoolExecutor(max_workers=3) self._thread_pool = thread_pool self._sig_pool = sig_pool self._service = Interconnect(bind_component, self._dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True) config_file = os.path.join(config_dir, "validator.toml") validator_config = {} if os.path.exists(config_file): with open(config_file) as fd: raw_config = fd.read() validator_config = toml.loads(raw_config) if scheduler_type is None: scheduler_type = validator_config.get("scheduler", "serial") executor = TransactionExecutor( service=self._service, context_manager=context_manager, settings_view_factory=SettingsViewFactory( StateViewFactory(merkle_db)), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) self._executor = executor self._service.set_check_connections(executor.check_connections) state_delta_processor = StateDeltaProcessor(self._service, state_delta_store, block_store) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_thread_pool = network_thread_pool self._network_dispatcher = Dispatcher() secure = False if network_public_key is not None and network_private_key is not None: secure = True self._network = Interconnect( bind_network, dispatcher=self._network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=30, max_incoming_connections=100) self._gossip = Gossip(self._network, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=3, maximum_peer_connectivity=10, topology_check_frequency=1) completer = Completer(block_store, self._gossip) block_sender = BroadcastBlockSender(completer, self._gossip) batch_sender = BroadcastBatchSender(completer, self._gossip) chain_id_manager = ChainIdManager(data_dir) # Create and configure journal self._journal = Journal( block_store=block_store, state_view_factory=StateViewFactory(merkle_db), block_sender=block_sender, batch_sender=batch_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler(), identity_signing_key=identity_signing_key, chain_id_manager=chain_id_manager, state_delta_processor=state_delta_processor, data_dir=data_dir, config_dir=config_dir, check_publish_block_frequency=0.1, block_cache_purge_frequency=30, block_cache_keep_time=300, batch_observers=[batch_tracker] ) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_key=identity_signing_key, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender ) responder = Responder(completer) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(executor.processors), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(executor.processors), thread_pool) # Set up base network handlers self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_CONNECT, ConnectHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_DISCONNECT, DisconnectHandler(network=self._network), network_thread_pool) # Set up gossip handlers self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, GetPeersRequestHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, GetPeersResponseHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(gossip=self._gossip), network_thread_pool) # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) # GOSSIP_MESSAGE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), sig_pool) # GOSSIP_MESSAGE 3) Verifies batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, structure_verifier.GossipHandlerStructureVerifier(), network_thread_pool) # GOSSIP_MESSAGE 4) Determines if we should broadcast the # message to our peers. It is important that this occur prior # to the sending of the message to the completer, as this step # relies on whether the gossip message has previously been # seen by the validator to determine whether or not forwarding # should occur self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler( gossip=self._gossip, completer=completer), network_thread_pool) # GOSSIP_MESSAGE 5) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_REQUEST, BlockResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, GossipBlockResponseHandler(), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, signature_verifier.GossipBlockResponseSignatureVerifier(), sig_pool) # GOSSIP_BLOCK_RESPONSE 3) Check batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, structure_verifier.GossipBlockResponseStructureVerifier(), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 4) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, CompleterGossipBlockResponseHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, ResponderBlockResponseHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST, BatchByBatchIdResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST, BatchByTransactionIdResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, GossipBatchResponseHandler(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, signature_verifier.GossipBatchResponseSignatureVerifier(), sig_pool) # GOSSIP_BATCH_RESPONSE 3) Check batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, structure_verifier.GossipBatchResponseStructureVerifier(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 4) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, CompleterGossipBatchResponseHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, ResponderBatchResponseHandler(responder, self._gossip), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, BatchListPermissionVerifier( settings_view_factory=SettingsViewFactory( StateViewFactory(merkle_db)), current_root_func=self._journal.get_current_root, ), sig_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), sig_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, structure_verifier.BatchListStructureVerifier(), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler( completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, client_handlers.BatchSubmitFinisher(batch_tracker), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST, client_handlers.BatchStatusRequest(batch_tracker), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest( merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest( merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST, client_handlers.BatchListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_GET_REQUEST, client_handlers.BatchGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST, client_handlers.TransactionListRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST, client_handlers.TransactionGetRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool) # State Delta Subscription Handlers self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST, StateDeltaSubscriberValidationHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST, StateDeltaAddSubscriberHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST, StateDeltaUnsubscriberHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST, StateDeltaGetEventsHandler(block_store, state_delta_store), thread_pool)
class TestCompleter(): def __init__(self): self.block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.completer._has_block = self._has_block self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = [] def _on_block_received(self, block): return self.blocks.append(block.header_signature) def _on_batch_received(self, batch): return self.batches.append(batch.header_signature) def _has_block(self, batch): return self._has_block_value def _create_transactions(self, count, missing_dep=False): txn_list = [] for _ in range(count): payload = { 'Verb': 'set', 'Name': 'name' + str(random.randint(0, 100)), 'Value': random.randint(0, 100) } intkey_prefix = \ hashlib.sha512('intkey'.encode('utf-8')).hexdigest()[0:6] addr = intkey_prefix + \ hashlib.sha512(payload["Name"].encode('utf-8')).hexdigest() payload_encode = hashlib.sha512(cbor.dumps(payload)).hexdigest() header = TransactionHeader( signer_public_key=self.signer.get_public_key().as_hex(), family_name='intkey', family_version='1.0', inputs=[addr], outputs=[addr], dependencies=[], batcher_public_key=self.signer.get_public_key().as_hex(), payload_sha512=payload_encode) if missing_dep: header.dependencies.extend(["Missing"]) header_bytes = header.SerializeToString() signature = self.signer.sign(header_bytes) transaction = Transaction(header=header_bytes, payload=cbor.dumps(payload), header_signature=signature) txn_list.append(transaction) return txn_list def _create_batches(self, batch_count, txn_count, missing_dep=False): batch_list = [] for _ in range(batch_count): txn_list = self._create_transactions(txn_count, missing_dep=missing_dep) txn_sig_list = [txn.header_signature for txn in txn_list] batch_header = BatchHeader( signer_public_key=self.signer.get_public_key().as_hex()) batch_header.transaction_ids.extend(txn_sig_list) header_bytes = batch_header.SerializeToString() signature = self.signer.sign(header_bytes) batch = Batch(header=header_bytes, transactions=txn_list, header_signature=signature) batch_list.append(batch) return batch_list def _create_blocks(self, block_count, batch_count, missing_predecessor=False, missing_batch=False, find_batch=True): block_list = [] for i in range(0, block_count): batch_list = self._create_batches(batch_count, 2) batch_ids = [batch.header_signature for batch in batch_list] if missing_predecessor: predecessor = "Missing" else: predecessor = (block_list[i - 1].header_signature if i > 0 else NULL_BLOCK_IDENTIFIER) block_header = BlockHeader( signer_public_key=self.signer.get_public_key().as_hex(), batch_ids=batch_ids, block_num=i, previous_block_id=predecessor) header_bytes = block_header.SerializeToString() signature = self.signer.sign(header_bytes) if missing_batch: if find_batch: self.completer.add_batch(batch_list[-1]) batch_list = batch_list[:-1] block = Block(header=header_bytes, batches=batch_list, header_signature=signature) block_list.append(block) return block_list
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, fork_cache_keep_time, network_public_key=None, network_private_key=None, roles=None, component_thread_pool_workers=10, network_thread_pool_workers=10, signature_thread_pool_workers=3): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing component_thread_pool_workers (int): number of workers in the component thread pool; defaults to 10. network_thread_pool_workers (int): number of workers in the network thread pool; defaults to 10. signature_thread_pool_workers (int): number of workers in the signature thread pool; defaults to 3. """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug( 'global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) native_state_view_factory = NativeStateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = NativeLmdbDatabase( block_db_filename, indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_manager = BlockManager() block_manager.add_commit_store(block_store) block_status_store = BlockValidationResultStore() # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=component_thread_pool_workers, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor( max_workers=network_thread_pool_workers, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor( max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor( max_workers=signature_thread_pool_workers, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect( bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect( bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store.has_batch) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster( component_service, block_store, receipt_store) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect( bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, max_future_callback_workers=10) consensus_registry = ConsensusRegistry() consensus_notifier = ConsensusNotifier( consensus_service, consensus_registry, identity_signer.get_public_key().as_hex()) # -- Setup P2P Networking -- # gossip = Gossip( network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, consensus_notifier, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1 ) consensus_notifier.set_gossip(gossip) completer = Completer( block_manager=block_manager, transaction_committed=block_store.has_transaction, get_committed_batch_by_id=block_store.get_batch, get_committed_batch_by_txn_id=( block_store.get_batch_by_transaction ), get_chain_head=lambda: unwrap_if_not_none(block_store.chain_head), gossip=gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) self._completer = completer block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver( to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( block_manager=block_manager, transaction_executor=transaction_executor, transaction_committed=block_store.has_transaction, batch_committed=block_store.has_batch, state_view_factory=native_state_view_factory, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_validator = BlockValidator( block_manager=block_manager, view_factory=native_state_view_factory, transaction_executor=transaction_executor, block_status_store=block_status_store, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_manager=block_manager, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, block_status_store=block_status_store, consensus_notifier=consensus_notifier, consensus_registry=consensus_registry, state_pruning_block_depth=state_pruning_block_depth, fork_cache_keep_time=fork_cache_keep_time, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, block_manager=block_manager, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender, receipt_store=receipt_store) responder = Responder(completer) completer.set_on_block_received(chain_controller.queue_block) self._incoming_batch_sender = None # -- Register Message Handler -- # network_handlers.add( network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, lambda block_id: block_id in block_manager, self.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add( component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher, identity_signer.get_public_key().as_hex()) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_manager=block_manager, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory, consensus_registry=consensus_registry, consensus_notifier=consensus_notifier) consensus_handlers.add( consensus_dispatcher, consensus_thread_pool, consensus_proxy, consensus_notifier) self._block_status_store = block_status_store self._consensus_notifier = consensus_notifier self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._consensus_registry = consensus_registry self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._block_validator = block_validator self._chain_controller = chain_controller self._block_validator = block_validator
def __init__(self, bind_network, bind_component, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signing_key, network_public_key=None, network_private_key=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signing_key (str): key validator uses for signing """ db_filename = os.path.join(data_dir, 'merkle-{}.lmdb'.format( bind_network[-2:])) LOGGER.debug('database file is %s', db_filename) merkle_db = LMDBNoLockDatabase(db_filename, 'c') delta_db_filename = os.path.join(data_dir, 'state-deltas-{}.lmdb'.format( bind_network[-2:])) LOGGER.debug('state delta store file is %s', delta_db_filename) state_delta_db = LMDBNoLockDatabase(delta_db_filename, 'c') state_delta_store = StateDeltaStore(state_delta_db) context_manager = ContextManager(merkle_db, state_delta_store) self._context_manager = context_manager state_view_factory = StateViewFactory(merkle_db) block_db_filename = os.path.join(data_dir, 'block-{}.lmdb'.format( bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = LMDBNoLockDatabase(block_db_filename, 'c') block_store = BlockStore(block_db) batch_tracker = BatchTracker(block_store) block_store.add_update_observer(batch_tracker) # setup network self._dispatcher = Dispatcher() thread_pool = ThreadPoolExecutor(max_workers=10) sig_pool = ThreadPoolExecutor(max_workers=3) self._thread_pool = thread_pool self._sig_pool = sig_pool self._service = Interconnect(bind_component, self._dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True) executor = TransactionExecutor( service=self._service, context_manager=context_manager, settings_view_factory=SettingsViewFactory( StateViewFactory(merkle_db)), invalid_observers=[batch_tracker]) self._executor = executor self._service.set_check_connections(executor.check_connections) state_delta_processor = StateDeltaProcessor(self._service, state_delta_store, block_store) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] network_thread_pool = ThreadPoolExecutor(max_workers=10) self._network_thread_pool = network_thread_pool self._network_dispatcher = Dispatcher() secure = False if network_public_key is not None and network_private_key is not None: secure = True self._network = Interconnect( bind_network, dispatcher=self._network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=30, max_incoming_connections=100) self._gossip = Gossip(self._network, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=3, maximum_peer_connectivity=10, topology_check_frequency=1) completer = Completer(block_store, self._gossip) block_sender = BroadcastBlockSender(completer, self._gossip) batch_sender = BroadcastBatchSender(completer, self._gossip) chain_id_manager = ChainIdManager(data_dir) # Create and configure journal self._journal = Journal( block_store=block_store, state_view_factory=StateViewFactory(merkle_db), block_sender=block_sender, batch_sender=batch_sender, transaction_executor=executor, squash_handler=context_manager.get_squash_handler(), identity_signing_key=identity_signing_key, chain_id_manager=chain_id_manager, state_delta_processor=state_delta_processor, data_dir=data_dir, config_dir=config_dir, check_publish_block_frequency=0.1, block_cache_purge_frequency=30, block_cache_keep_time=300, batch_observers=[batch_tracker] ) self._genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_key=identity_signing_key, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender ) responder = Responder(completer) completer.set_on_batch_received(self._journal.on_batch_received) completer.set_on_block_received(self._journal.on_block_received) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_GET_REQUEST, tp_state_handlers.TpStateGetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_STATE_SET_REQUEST, tp_state_handlers.TpStateSetHandler(context_manager), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_REGISTER_REQUEST, processor_handlers.ProcessorRegisterHandler(executor.processors), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.TP_UNREGISTER_REQUEST, processor_handlers.ProcessorUnRegisterHandler(executor.processors), thread_pool) # Set up base network handlers self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_PING, PingHandler(), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_CONNECT, ConnectHandler(network=self._network), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.NETWORK_DISCONNECT, DisconnectHandler(network=self._network), network_thread_pool) # Set up gossip handlers self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST, GetPeersRequestHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE, GetPeersResponseHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_REGISTER, PeerRegisterHandler(gossip=self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_UNREGISTER, PeerUnregisterHandler(gossip=self._gossip), network_thread_pool) # GOSSIP_MESSAGE 1) Sends acknowledgement to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipMessageHandler(), network_thread_pool) # GOSSIP_MESSAGE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, signature_verifier.GossipMessageSignatureVerifier(), sig_pool) # GOSSIP_MESSAGE 3) Verifies batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, structure_verifier.GossipHandlerStructureVerifier(), network_thread_pool) # GOSSIP_MESSAGE 4) Determines if we should broadcast the # message to our peers. It is important that this occur prior # to the sending of the message to the completer, as this step # relies on whether the gossip message has previously been # seen by the validator to determine whether or not forwarding # should occur self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, GossipBroadcastHandler( gossip=self._gossip, completer=completer), network_thread_pool) # GOSSIP_MESSAGE 5) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_MESSAGE, CompleterGossipHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_REQUEST, BlockResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, GossipBlockResponseHandler(), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, signature_verifier.GossipBlockResponseSignatureVerifier(), sig_pool) # GOSSIP_BLOCK_RESPONSE 3) Check batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, structure_verifier.GossipBlockResponseStructureVerifier(), network_thread_pool) # GOSSIP_BLOCK_RESPONSE 4) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, CompleterGossipBlockResponseHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BLOCK_RESPONSE, ResponderBlockResponseHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST, BatchByBatchIdResponderHandler(responder, self._gossip), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST, BatchByTransactionIdResponderHandler(responder, self._gossip), network_thread_pool) # GOSSIP_BATCH_RESPONSE 1) Sends ack to the sender self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, GossipBatchResponseHandler(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 2) Verifies signature self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, signature_verifier.GossipBatchResponseSignatureVerifier(), sig_pool) # GOSSIP_BATCH_RESPONSE 3) Check batch structure self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, structure_verifier.GossipBatchResponseStructureVerifier(), network_thread_pool) # GOSSIP_BATCH_RESPONSE 4) Send message to completer self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, CompleterGossipBatchResponseHandler( completer), network_thread_pool) self._network_dispatcher.add_handler( validator_pb2.Message.GOSSIP_BATCH_RESPONSE, ResponderBatchResponseHandler(responder, self._gossip), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, BatchListPermissionVerifier( settings_view_factory=SettingsViewFactory( StateViewFactory(merkle_db)), current_root_func=self._journal.get_current_root, ), sig_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, signature_verifier.BatchListSignatureVerifier(), sig_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, structure_verifier.BatchListStructureVerifier(), network_thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, CompleterBatchListBroadcastHandler( completer, self._gossip), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_SUBMIT_REQUEST, client_handlers.BatchSubmitFinisher(batch_tracker), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_STATUS_REQUEST, client_handlers.BatchStatusRequest(batch_tracker), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_LIST_REQUEST, client_handlers.StateListRequest( merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_GET_REQUEST, client_handlers.StateGetRequest( merkle_db, self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_LIST_REQUEST, client_handlers.BlockListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BLOCK_GET_REQUEST, client_handlers.BlockGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_LIST_REQUEST, client_handlers.BatchListRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_BATCH_GET_REQUEST, client_handlers.BatchGetRequest(self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_LIST_REQUEST, client_handlers.TransactionListRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_TRANSACTION_GET_REQUEST, client_handlers.TransactionGetRequest( self._journal.get_block_store()), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.CLIENT_STATE_CURRENT_REQUEST, client_handlers.StateCurrentRequest( self._journal.get_current_root), thread_pool) # State Delta Subscription Handlers self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST, StateDeltaSubscriberValidationHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_SUBSCRIBE_REQUEST, StateDeltaAddSubscriberHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_UNSUBSCRIBE_REQUEST, StateDeltaUnsubscriberHandler(state_delta_processor), thread_pool) self._dispatcher.add_handler( validator_pb2.Message.STATE_DELTA_GET_EVENTS_REQUEST, GetStateDeltaEventsHandler(block_store, state_delta_store), thread_pool)
from sawtooth_validator.journal.completer import Completer from sawtooth_validator.database.dict_database import DictDatabase from sawtooth_validator.journal.block_store import BlockStore from sawtooth_validator.journal.block_wrapper import NULL_BLOCK_IDENTIFIER from sawtooth_validator.protobuf.batch_pb2 import BatchHeader, Batch from sawtooth_validator.protobuf.block_pb2 import BlockHeader, Block from sawtooth_validator.protobuf.transaction_pb2 import TransactionHeader, Transaction block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) gossip = MockGossip() context = create_context('secp256k1') crypto_factory = CryptoFactory(context) private_key = context.new_random_private_key() signer = crypto_factory.new_signer(private_key) completer = Completer(block_store, gossip) def _create_transactions(count, missing_dep=False): txn_list = [] for _ in range(count): payload = { 'Verb': 'set', 'Name': 'name' + str(random.randint(0, 100)), 'Value': random.randint(0, 100) } intkey_prefix = \ hashlib.sha512('intkey'.encode('utf-8')).hexdigest()[0:6] addr = intkey_prefix + \
class TestCompleter(unittest.TestCase): def setUp(self): self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.completer._has_block = self._has_block self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = [] def _on_block_received(self, block): return self.blocks.append(block.header_signature) def _on_batch_received(self, batch): return self.batches.append(batch.header_signature) def _has_block(self, batch): return self._has_block_value def _create_transactions(self, count, missing_dep=False): txn_list = [] for _ in range(count): payload = { 'Verb': 'set', 'Name': 'name' + str(random.randint(0, 100)), 'Value': random.randint(0, 100) } intkey_prefix = \ hashlib.sha512('intkey'.encode('utf-8')).hexdigest()[0:6] addr = intkey_prefix + \ hashlib.sha512(payload["Name"].encode('utf-8')).hexdigest() payload_encode = hashlib.sha512(cbor.dumps(payload)).hexdigest() header = TransactionHeader( signer_public_key=self.signer.get_public_key().as_hex(), family_name='intkey', family_version='1.0', inputs=[addr], outputs=[addr], dependencies=[], batcher_public_key=self.signer.get_public_key().as_hex(), payload_sha512=payload_encode) if missing_dep: header.dependencies.extend(["Missing"]) header_bytes = header.SerializeToString() signature = self.signer.sign(header_bytes) transaction = Transaction( header=header_bytes, payload=cbor.dumps(payload), header_signature=signature) txn_list.append(transaction) return txn_list def _create_batches(self, batch_count, txn_count, missing_dep=False): batch_list = [] for _ in range(batch_count): txn_list = self._create_transactions(txn_count, missing_dep=missing_dep) txn_sig_list = [txn.header_signature for txn in txn_list] batch_header = BatchHeader( signer_public_key=self.signer.get_public_key().as_hex()) batch_header.transaction_ids.extend(txn_sig_list) header_bytes = batch_header.SerializeToString() signature = self.signer.sign(header_bytes) batch = Batch( header=header_bytes, transactions=txn_list, header_signature=signature) batch_list.append(batch) return batch_list def _create_blocks(self, block_count, batch_count, missing_predecessor=False, missing_batch=False, find_batch=True): block_list = [] for i in range(0, block_count): batch_list = self._create_batches(batch_count, 2) batch_ids = [batch.header_signature for batch in batch_list] if missing_predecessor: predecessor = "Missing" else: predecessor = (block_list[i - 1].header_signature if i > 0 else NULL_BLOCK_IDENTIFIER) block_header = BlockHeader( signer_public_key=self.signer.get_public_key().as_hex(), batch_ids=batch_ids, block_num=i, previous_block_id=predecessor) header_bytes = block_header.SerializeToString() signature = self.signer.sign(header_bytes) if missing_batch: if find_batch: self.completer.add_batch(batch_list[-1]) batch_list = batch_list[:-1] block = Block( header=header_bytes, batches=batch_list, header_signature=signature) block_list.append(block) return block_list def test_good_block(self): """ Add completed block to completer. Block should be passed to on_block_recieved. """ block = self._create_blocks(1, 1)[0] self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) def test_duplicate_block(self): """ Submit same block twice. """ block = self._create_blocks(1, 1)[0] self.completer.add_block(block) self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) self.assertEqual(len(self.blocks), 1) def test_block_missing_predecessor(self): """ The block is completed but the predecessor is missing. """ block = self._create_blocks(1, 1, missing_predecessor=True)[0] self._has_block_value = False self.completer.add_block(block) self.assertEqual(len(self.blocks), 0) self.assertIn("Missing", self.gossip.requested_blocks) header = BlockHeader(previous_block_id=NULL_BLOCK_IDENTIFIER) missing_block = Block(header_signature="Missing", header=header.SerializeToString()) self._has_block_value = True self.completer.add_block(missing_block) self.assertIn(block.header_signature, self.blocks) self.assertEqual( block, self.completer.get_block(block.header_signature).get_block()) def test_block_with_extra_batch(self): """ The block has a batch that is not in the batch_id list. """ block = self._create_blocks(1, 1)[0] batches = self._create_batches(1, 1, True) block.batches.extend(batches) self.completer.add_block(block) self.assertEqual(len(self.blocks), 0) def test_block_missing_batch(self): """ The block is a missing batch and the batch is in the cache. The Block will be build and passed to on_block_recieved. This puts the block in the self.blocks list. """ block = self._create_blocks(1, 2, missing_batch=True)[0] self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) self.assertEqual( block, self.completer.get_block(block.header_signature).get_block()) def test_block_missing_batch_not_in_cache(self): """ The block is a missing batch and the batch is not in the cache. The batch will be requested and the block will not be passed to on_block_recieved. """ block = self._create_blocks( 1, 3, missing_batch=True, find_batch=False)[0] self.completer.add_block(block) header = BlockHeader() header.ParseFromString(block.header) self.assertIn(header.batch_ids[-1], self.gossip.requested_batches) def test_block_batches_wrong_order(self): """ The block has all of its batches but they are in the wrong order. The batches will be reordered and the block will be passed to on_block_recieved. """ block = self._create_blocks(1, 6)[0] batches = list(block.batches) random.shuffle(batches) del block.batches[:] block.batches.extend(batches) self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) def test_block_batches_wrong_batch(self): """ The block has all the correct number of batches but one is not in the batch_id list. This block should be dropped. """ block = self._create_blocks(1, 6)[0] batch = Batch(header_signature="Extra") batches = list(block.batches) batches[-1] = batch block.batches.extend(batches) self.completer.add_block(block) self.assertEqual(len(self.blocks), 0) def test_good_batch(self): """ Add complete batch to completer. The batch should be passed to on_batch_received. """ batch = self._create_batches(1, 1)[0] self.completer.add_batch(batch) self.assertIn(batch.header_signature, self.batches) self.assertEqual(batch, self.completer.get_batch(batch.header_signature)) def test_batch_with_missing_dep(self): """ Add batch to completer that has a missing dependency. The missing transaction's batch should be requested add the missing batch is then added to the completer. The incomplete batch should be rechecked and passed to on_batch_received. """ batch = self._create_batches(1, 1, missing_dep=True)[0] self.completer.add_batch(batch) self.assertIn("Missing", self.gossip.requested_batches_by_txn_id) missing = Transaction(header_signature="Missing") missing_batch = Batch(header_signature="Missing_batch", transactions=[missing]) self.completer.add_batch(missing_batch) self.assertIn(missing_batch.header_signature, self.batches) self.assertIn(batch.header_signature, self.batches) self.assertEqual(missing_batch, self.completer.get_batch_by_transaction("Missing"))
class TestCompleter(unittest.TestCase): def setUp(self): self.block_store = BlockStore({}) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_privkey() self.public_key = signing.encode_pubkey( signing.generate_pubkey(self.private_key), "hex") self.blocks = [] self.batches = [] def _on_block_received(self, block): print("Block received") return self.blocks.append(block.header_signature) def _on_batch_received(self, batch): print("Batch received") return self.batches.append(batch.header_signature) def _create_transactions(self, count, missing_dep=False): txn_list = [] for i in range(count): payload = { 'Verb': 'set', 'Name': 'name' + str(random.randint(0, 100)), 'Value': random.randint(0, 100) } intkey_prefix = \ hashlib.sha512('intkey'.encode('utf-8')).hexdigest()[0:6] addr = intkey_prefix + \ hashlib.sha512(payload["Name"].encode('utf-8')).hexdigest() payload_encode = hashlib.sha512(cbor.dumps(payload)).hexdigest() header = TransactionHeader(signer_pubkey=self.public_key, family_name='intkey', family_version='1.0', inputs=[addr], outputs=[addr], dependencies=[], payload_encoding="application/cbor", payload_sha512=payload_encode) if missing_dep: header.dependencies.extend(["Missing"]) header_bytes = header.SerializeToString() signature = signing.sign(header_bytes, self.private_key) transaction = Transaction(header=header_bytes, payload=cbor.dumps(payload), header_signature=signature) txn_list.append(transaction) return txn_list def _generate_id(self): return hashlib.sha512(''.join([ random.choice(string.ascii_letters) for _ in range(0, 1024) ]).encode()).hexdigest() def _create_batches(self, batch_count, txn_count, missing_dep=False): batch_list = [] for i in range(batch_count): txn_list = self._create_transactions(txn_count, missing_dep=missing_dep) txn_sig_list = [txn.header_signature for txn in txn_list] batch_header = BatchHeader(signer_pubkey=self.public_key) batch_header.transaction_ids.extend(txn_sig_list) header_bytes = batch_header.SerializeToString() signature = signing.sign(header_bytes, self.private_key) batch = Batch(header=header_bytes, transactions=txn_list, header_signature=signature) batch_list.append(batch) return batch_list def _create_blocks(self, block_count, batch_count, missing_predecessor=False, missing_batch=False, find_batch=True): block_list = [] pred = 0 for i in range(0, block_count): batch_list = self._create_batches(batch_count, 2) batch_ids = [batch.header_signature for batch in batch_list] if missing_predecessor: predecessor = "Missing" else: predecessor = (block_list[i - 1].header_signature if i > 0 else NULL_BLOCK_IDENTIFIER) block_header = BlockHeader(signer_pubkey=self.public_key, batch_ids=batch_ids, block_num=i, previous_block_id=predecessor) header_bytes = block_header.SerializeToString() signature = signing.sign(header_bytes, self.private_key) if missing_batch: if find_batch: self.completer.add_batch(batch_list[-1]) batch_list = batch_list[:-1] block = Block(header=header_bytes, batches=batch_list, header_signature=signature) block_list.append(block) return block_list def test_good_block(self): """ Add completed block to completer. Block should be passed to on_block_recieved. """ block = self._create_blocks(1, 1)[0] self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) def test_duplicate_block(self): """ Submit same block twice. """ block = block = self._create_blocks(1, 1)[0] self.completer.add_block(block) self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) self.assertEquals(len(self.blocks), 1) def test_block_missing_predecessor(self): """ The block is completed but the predecessor is missing. """ block = self._create_blocks(1, 1, missing_predecessor=True)[0] self.completer.add_block(block) self.assertEquals(len(self.blocks), 0) self.assertIn("Missing", self.gossip.requested_blocks) header = BlockHeader(previous_block_id=NULL_BLOCK_IDENTIFIER) missing_block = Block(header_signature="Missing", header=header.SerializeToString()) self.completer.add_block(missing_block) self.assertIn(block.header_signature, self.blocks) self.assertEquals( block, self.completer.get_block(block.header_signature).get_block()) def test_block_with_extra_batch(self): """ The block has a batch that is not in the batch_id list. """ block = self._create_blocks(1, 1)[0] batches = self._create_batches(1, 1, True) block.batches.extend(batches) self.completer.add_block(block) self.assertEquals(len(self.blocks), 0) def test_block_missing_batch(self): """ The block is a missing batch and the batch is in the cache. The Block will be build and passed to on_block_recieved. This puts the block in the self.blocks list. """ block = self._create_blocks(1, 2, missing_batch=True)[0] self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) self.assertEquals( block, self.completer.get_block(block.header_signature).get_block()) def test_block_missing_batch_not_in_cache(self): """ The block is a missing batch and the batch is not in the cache. The batch will be requested and the block will not be passed to on_block_recieved. """ block = self._create_blocks(1, 3, missing_batch=True, find_batch=False)[0] self.completer.add_block(block) header = BlockHeader() header.ParseFromString(block.header) self.assertIn(header.batch_ids[-1], self.gossip.requested_batches) def test_block_batches_wrong_order(self): """ The block has all of its batches but they are in the wrong order. The batches will be reordered and the block will be passed to on_block_recieved. """ block = self._create_blocks(1, 6)[0] batches = list(block.batches) random.shuffle(batches) del block.batches[:] block.batches.extend(batches) self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) def test_block_batches_wrong_batch(self): """ The block has all the correct number of batches but one is not in the batch_id list. This block should be dropped. """ block = self._create_blocks(1, 6)[0] batch = Batch(header_signature="Extra") batches = list(block.batches) batches[-1] = batch block.batches.extend(batches) self.completer.add_block(block) self.assertEquals(len(self.blocks), 0) def test_good_batch(self): """ Add complete batch to completer. The batch should be passed to on_batch_received. """ batch = self._create_batches(1, 1)[0] self.completer.add_batch(batch) self.assertIn(batch.header_signature, self.batches) self.assertEquals(batch, self.completer.get_batch(batch.header_signature)) def test_batch_with_missing_dep(self): """ Add batch to completer that has a missing dependency. The missing transaction's batch should be requested add the missing batch is then added to the completer. The incomplete batch should be rechecked and passed to on_batch_received. """ batch = self._create_batches(1, 1, missing_dep=True)[0] self.completer.add_batch(batch) self.assertIn("Missing", self.gossip.requested_batches_by_transactin_id) missing = Transaction(header_signature="Missing") missing_batch = Batch(header_signature="Missing_batch", transactions=[missing]) self.completer.add_batch(missing_batch) self.assertIn(missing_batch.header_signature, self.batches) self.assertIn(batch.header_signature, self.batches) self.assertEquals(missing_batch, self.completer.get_batch_by_transaction("Missing"))
class TestCompleter(unittest.TestCase): def setUp(self): self.dir = tempfile.mkdtemp() self.block_db = NativeLmdbDatabase( os.path.join(self.dir, 'block.lmdb'), BlockStore.create_index_configuration()) self.block_store = BlockStore(self.block_db) self.block_manager = BlockManager() self.block_manager.add_commit_store(self.block_store) self.gossip = MockGossip() self.completer = Completer( block_manager=self.block_manager, transaction_committed=self.block_store.has_transaction, get_committed_batch_by_id=self.block_store.get_batch, get_committed_batch_by_txn_id=( self.block_store.get_batch_by_transaction), gossip=self.gossip) self.completer.set_get_chain_head(lambda: self.block_store.chain_head) self.completer.set_on_block_received(self._on_block_received) self.completer.set_on_batch_received(self._on_batch_received) self._has_block_value = True context = create_context('secp256k1') private_key = context.new_random_private_key() crypto_factory = CryptoFactory(context) self.signer = crypto_factory.new_signer(private_key) self.blocks = [] self.batches = [] def _on_block_received(self, block_id): return self.blocks.append(block_id) def _on_batch_received(self, batch): return self.batches.append(batch.header_signature) def _has_block(self, batch): return self._has_block_value def _create_transactions(self, count, missing_dep=False): txn_list = [] for _ in range(count): payload = { 'Verb': 'set', 'Name': 'name' + str(random.randint(0, 100)), 'Value': random.randint(0, 100) } intkey_prefix = \ hashlib.sha512('intkey'.encode('utf-8')).hexdigest()[0:6] addr = intkey_prefix + \ hashlib.sha512(payload["Name"].encode('utf-8')).hexdigest() payload_encode = hashlib.sha512(cbor.dumps(payload)).hexdigest() header = TransactionHeader( signer_public_key=self.signer.get_public_key().as_hex(), family_name='intkey', family_version='1.0', inputs=[addr], outputs=[addr], dependencies=[], batcher_public_key=self.signer.get_public_key().as_hex(), payload_sha512=payload_encode) if missing_dep: header.dependencies.extend(["Missing"]) header_bytes = header.SerializeToString() signature = self.signer.sign(header_bytes) transaction = Transaction(header=header_bytes, payload=cbor.dumps(payload), header_signature=signature) txn_list.append(transaction) return txn_list def _create_batches(self, batch_count, txn_count, missing_dep=False): batch_list = [] for _ in range(batch_count): txn_list = self._create_transactions(txn_count, missing_dep=missing_dep) txn_sig_list = [txn.header_signature for txn in txn_list] batch_header = BatchHeader( signer_public_key=self.signer.get_public_key().as_hex()) batch_header.transaction_ids.extend(txn_sig_list) header_bytes = batch_header.SerializeToString() signature = self.signer.sign(header_bytes) batch = Batch(header=header_bytes, transactions=txn_list, header_signature=signature) batch_list.append(batch) return batch_list def _create_blocks(self, block_count, batch_count, missing_predecessor=False, missing_batch=False, find_batch=True): block_list = [] for i in range(0, block_count): batch_list = self._create_batches(batch_count, 2) batch_ids = [batch.header_signature for batch in batch_list] if missing_predecessor: predecessor = "Missing" else: predecessor = (block_list[i - 1].header_signature if i > 0 else NULL_BLOCK_IDENTIFIER) block_header = BlockHeader( signer_public_key=self.signer.get_public_key().as_hex(), batch_ids=batch_ids, block_num=i, previous_block_id=predecessor) header_bytes = block_header.SerializeToString() signature = self.signer.sign(header_bytes) if missing_batch: if find_batch: self.completer.add_batch(batch_list[-1]) batch_list = batch_list[:-1] block = Block(header=header_bytes, batches=batch_list, header_signature=signature) block_list.append(block) return block_list def test_good_block(self): """ Add completed block to completer. Block should be passed to on_block_recieved. """ block = self._create_blocks(1, 1)[0] self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) def test_duplicate_block(self): """ Submit same block twice. """ block = self._create_blocks(1, 1)[0] self.completer.add_block(block) self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) self.assertEqual(len(self.blocks), 1) def test_block_missing_predecessor(self): """ The block is completed but the predecessor is missing. """ block = self._create_blocks(1, 1, missing_predecessor=True)[0] self._has_block_value = False self.completer.add_block(block) self.assertEqual(len(self.blocks), 0) self.assertIn("Missing", self.gossip.requested_blocks) header = BlockHeader(previous_block_id=NULL_BLOCK_IDENTIFIER) missing_block = Block(header_signature="Missing", header=header.SerializeToString()) self._has_block_value = True self.completer.add_block(missing_block) self.assertIn(block.header_signature, self.blocks) self.assertEqual(block, self.completer.get_block(block.header_signature)) def test_block_with_extra_batch(self): """ The block has a batch that is not in the batch_id list. """ block = self._create_blocks(1, 1)[0] batches = self._create_batches(1, 1, True) block.batches.extend(batches) self.completer.add_block(block) self.assertEqual(len(self.blocks), 0) def test_block_missing_batch(self): """ The block is a missing batch and the batch is in the cache. The Block will be build and passed to on_block_recieved. This puts the block in the self.blocks list. """ block = self._create_blocks(1, 2, missing_batch=True)[0] self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) self.assertEqual(block, self.completer.get_block(block.header_signature)) def test_block_missing_batch_not_in_cache(self): """ The block is a missing batch and the batch is not in the cache. The batch will be requested and the block will not be passed to on_block_recieved. """ block = self._create_blocks(1, 3, missing_batch=True, find_batch=False)[0] self.completer.add_block(block) header = BlockHeader() header.ParseFromString(block.header) self.assertIn(header.batch_ids[-1], self.gossip.requested_batches) def test_block_batches_wrong_order(self): """ The block has all of its batches but they are in the wrong order. The batches will be reordered and the block will be passed to on_block_recieved. """ block = self._create_blocks(1, 6)[0] batches = list(block.batches) random.shuffle(batches) del block.batches[:] block.batches.extend(batches) self.completer.add_block(block) self.assertIn(block.header_signature, self.blocks) def test_block_batches_wrong_batch(self): """ The block has all the correct number of batches but one is not in the batch_id list. This block should be dropped. """ block = self._create_blocks(1, 6)[0] batch = Batch(header_signature="Extra") batches = list(block.batches) batches[-1] = batch block.batches.extend(batches) self.completer.add_block(block) self.assertEqual(len(self.blocks), 0) def test_good_batch(self): """ Add complete batch to completer. The batch should be passed to on_batch_received. """ batch = self._create_batches(1, 1)[0] self.completer.add_batch(batch) self.assertIn(batch.header_signature, self.batches) self.assertEqual(batch, self.completer.get_batch(batch.header_signature)) def test_batch_with_missing_dep(self): """ Add batch to completer that has a missing dependency. The missing transaction's batch should be requested add the missing batch is then added to the completer. The incomplete batch should be rechecked and passed to on_batch_received. """ batch = self._create_batches(1, 1, missing_dep=True)[0] self.completer.add_batch(batch) self.assertIn("Missing", self.gossip.requested_batches_by_txn_id) missing = Transaction(header_signature="Missing") missing_batch = Batch(header_signature="Missing_batch", transactions=[missing]) self.completer.add_batch(missing_batch) self.assertIn(missing_batch.header_signature, self.batches) self.assertIn(batch.header_signature, self.batches) self.assertEqual(missing_batch, self.completer.get_batch_by_transaction("Missing"))
class Dispatcher(Thread): def __init__(self): super(Dispatcher, self).__init__() self.on_batch_received = None self.on_block_received = None self.on_block_requested = None self.incoming_msg_queue = None self.completer_queue = queue.Queue() self.completer_conditon = Condition() self.condition = None self._exit = False self.completer = None def create_completer(self): self.completer = Completer(self.on_block_received, self.completer_conditon, self.completer_queue) self.completer.start() def set_incoming_msg_queue(self, msg_queue): self.incoming_msg_queue = msg_queue def set_condition(self, condition): self.condition = condition def set_on_block_received(self, on_block_received_func): self.on_block_received = on_block_received_func def set_on_batch_received(self, on_batch_received_func): self.on_batch_received = on_batch_received_func def set_on_block_requested(self, on_block_requested_func): self.on_block_requested = on_block_requested_func def stop(self): self._exit = True self.completer.stop() with self.completer_conditon: self.completer_conditon.notify_all() def run(self): if self.incoming_msg_queue is None or self.condition is None: LOGGER.warning("Dispatcher can not be used if incoming_msg_queue " "or condition is not set.") return while True: if self._exit: return try: request = self.incoming_msg_queue.get(block=False) if request.content_type == "Block": try: block = Block() block.ParseFromString(request.content) LOGGER.debug("Block given to Completer %s", block.header_signature) self.completer_queue.put_nowait(block) with self.completer_conditon: self.completer_conditon.notify_all() except DecodeError as e: # what to do with a bad msg LOGGER.warning( "Problem decoding GossipMessage for " "Block, %s", e) elif request.content_type == "Batch": try: batch = Batch() batch.ParseFromString(request.content) self.completer.add_batch(batch) self.on_batch_received(batch) except DecodeError as e: LOGGER.warning( "Problem decoding GossipMessage for " "Batch, %s", e) elif request.content_type == "BlockRequest": block_id = str(request.content, "utf-8") self.on_block_requested(block_id) elif request.content_type == "Test": LOGGER.debug("Dispatch Handle Test") except queue.Empty: with self.condition: self.condition.wait()
def __init__(self, bind_network, bind_component, bind_consensus, endpoint, peering, seeds_list, peer_list, data_dir, config_dir, identity_signer, scheduler_type, permissions, minimum_peer_connectivity, maximum_peer_connectivity, state_pruning_block_depth, network_public_key=None, network_private_key=None, roles=None): """Constructs a validator instance. Args: bind_network (str): the network endpoint bind_component (str): the component endpoint endpoint (str): the zmq-style URI of this validator's publically reachable endpoint peering (str): The type of peering approach. Either 'static' or 'dynamic'. In 'static' mode, no attempted topology buildout occurs -- the validator only attempts to initiate peering connections with endpoints specified in the peer_list. In 'dynamic' mode, the validator will first attempt to initiate peering connections with endpoints specified in the peer_list and then attempt to do a topology buildout starting with peer lists obtained from endpoints in the seeds_list. In either mode, the validator will accept incoming peer requests up to max_peers. seeds_list (list of str): a list of addresses to connect to in order to perform the initial topology buildout peer_list (list of str): a list of peer addresses data_dir (str): path to the data directory config_dir (str): path to the config directory identity_signer (str): cryptographic signer the validator uses for signing """ # -- Setup Global State Database and Factory -- # global_state_db_filename = os.path.join( data_dir, 'merkle-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('global state database file is %s', global_state_db_filename) global_state_db = NativeLmdbDatabase( global_state_db_filename, indexes=MerkleDatabase.create_index_configuration()) state_view_factory = StateViewFactory(global_state_db) # -- Setup Receipt Store -- # receipt_db_filename = os.path.join( data_dir, 'txn_receipts-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('txn receipt store file is %s', receipt_db_filename) receipt_db = LMDBNoLockDatabase(receipt_db_filename, 'c') receipt_store = TransactionReceiptStore(receipt_db) # -- Setup Block Store -- # block_db_filename = os.path.join( data_dir, 'block-{}.lmdb'.format(bind_network[-2:])) LOGGER.debug('block store file is %s', block_db_filename) block_db = IndexedDatabase( block_db_filename, BlockStore.serialize_block, BlockStore.deserialize_block, flag='c', indexes=BlockStore.create_index_configuration()) block_store = BlockStore(block_db) # The cache keep time for the journal's block cache must be greater # than the cache keep time used by the completer. base_keep_time = 1200 block_cache = BlockCache(block_store, keep_time=int(base_keep_time * 9 / 8), purge_frequency=30) # -- Setup Thread Pools -- # component_thread_pool = InstrumentedThreadPoolExecutor( max_workers=10, name='Component') network_thread_pool = InstrumentedThreadPoolExecutor(max_workers=10, name='Network') client_thread_pool = InstrumentedThreadPoolExecutor(max_workers=5, name='Client') sig_pool = InstrumentedThreadPoolExecutor(max_workers=3, name='Signature') # -- Setup Dispatchers -- # component_dispatcher = Dispatcher() network_dispatcher = Dispatcher() # -- Setup Services -- # component_service = Interconnect(bind_component, component_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) zmq_identity = hashlib.sha512( time.time().hex().encode()).hexdigest()[:23] secure = False if network_public_key is not None and network_private_key is not None: secure = True network_service = Interconnect(bind_network, dispatcher=network_dispatcher, zmq_identity=zmq_identity, secured=secure, server_public_key=network_public_key, server_private_key=network_private_key, heartbeat=True, public_endpoint=endpoint, connection_timeout=120, max_incoming_connections=100, max_future_callback_workers=10, authorize=True, signer=identity_signer, roles=roles) # -- Setup Transaction Execution Platform -- # context_manager = ContextManager(global_state_db) batch_tracker = BatchTracker(block_store) settings_cache = SettingsCache( SettingsViewFactory(state_view_factory), ) transaction_executor = TransactionExecutor( service=component_service, context_manager=context_manager, settings_view_factory=SettingsViewFactory(state_view_factory), scheduler_type=scheduler_type, invalid_observers=[batch_tracker]) component_service.set_check_connections( transaction_executor.check_connections) event_broadcaster = EventBroadcaster(component_service, block_store, receipt_store) # -- Setup P2P Networking -- # gossip = Gossip(network_service, settings_cache, lambda: block_store.chain_head, block_store.chain_head_state_root, endpoint=endpoint, peering_mode=peering, initial_seed_endpoints=seeds_list, initial_peer_endpoints=peer_list, minimum_peer_connectivity=minimum_peer_connectivity, maximum_peer_connectivity=maximum_peer_connectivity, topology_check_frequency=1) completer = Completer(block_store, gossip, cache_keep_time=base_keep_time, cache_purge_frequency=30, requested_keep_time=300) block_sender = BroadcastBlockSender(completer, gossip) batch_sender = BroadcastBatchSender(completer, gossip) chain_id_manager = ChainIdManager(data_dir) identity_view_factory = IdentityViewFactory( StateViewFactory(global_state_db)) id_cache = IdentityCache(identity_view_factory) # -- Setup Permissioning -- # permission_verifier = PermissionVerifier( permissions, block_store.chain_head_state_root, id_cache) identity_observer = IdentityObserver(to_update=id_cache.invalidate, forked=id_cache.forked) settings_observer = SettingsObserver( to_update=settings_cache.invalidate, forked=settings_cache.forked) # -- Consensus Engine -- # consensus_thread_pool = InstrumentedThreadPoolExecutor( max_workers=3, name='Consensus') consensus_dispatcher = Dispatcher() consensus_service = Interconnect(bind_consensus, consensus_dispatcher, secured=False, heartbeat=False, max_incoming_connections=20, monitor=True, max_future_callback_workers=10) consensus_notifier = ConsensusNotifier(consensus_service) # -- Setup Journal -- # batch_injector_factory = DefaultBatchInjectorFactory( block_cache=block_cache, state_view_factory=state_view_factory, signer=identity_signer) block_publisher = BlockPublisher( transaction_executor=transaction_executor, block_cache=block_cache, state_view_factory=state_view_factory, settings_cache=settings_cache, block_sender=block_sender, batch_sender=batch_sender, chain_head=block_store.chain_head, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier, check_publish_block_frequency=0.1, batch_observers=[batch_tracker], batch_injector_factory=batch_injector_factory) block_publisher_batch_sender = block_publisher.batch_sender() block_validator = BlockValidator( block_cache=block_cache, state_view_factory=state_view_factory, transaction_executor=transaction_executor, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, permission_verifier=permission_verifier) chain_controller = ChainController( block_store=block_store, block_cache=block_cache, block_validator=block_validator, state_database=global_state_db, chain_head_lock=block_publisher.chain_head_lock, state_pruning_block_depth=state_pruning_block_depth, data_dir=data_dir, observers=[ event_broadcaster, receipt_store, batch_tracker, identity_observer, settings_observer ]) genesis_controller = GenesisController( context_manager=context_manager, transaction_executor=transaction_executor, completer=completer, block_store=block_store, state_view_factory=state_view_factory, identity_signer=identity_signer, data_dir=data_dir, config_dir=config_dir, chain_id_manager=chain_id_manager, batch_sender=batch_sender) responder = Responder(completer) completer.set_on_batch_received(block_publisher_batch_sender.send) completer.set_on_block_received(chain_controller.queue_block) completer.set_chain_has_block(chain_controller.has_block) # -- Register Message Handler -- # network_handlers.add(network_dispatcher, network_service, gossip, completer, responder, network_thread_pool, sig_pool, chain_controller.has_block, block_publisher.has_batch, permission_verifier, block_publisher, consensus_notifier) component_handlers.add(component_dispatcher, gossip, context_manager, transaction_executor, completer, block_store, batch_tracker, global_state_db, self.get_chain_head_state_root_hash, receipt_store, event_broadcaster, permission_verifier, component_thread_pool, client_thread_pool, sig_pool, block_publisher) # -- Store Object References -- # self._component_dispatcher = component_dispatcher self._component_service = component_service self._component_thread_pool = component_thread_pool self._network_dispatcher = network_dispatcher self._network_service = network_service self._network_thread_pool = network_thread_pool consensus_proxy = ConsensusProxy( block_cache=block_cache, chain_controller=chain_controller, block_publisher=block_publisher, gossip=gossip, identity_signer=identity_signer, settings_view_factory=SettingsViewFactory(state_view_factory), state_view_factory=state_view_factory) consensus_handlers.add(consensus_dispatcher, consensus_thread_pool, consensus_proxy) self._consensus_dispatcher = consensus_dispatcher self._consensus_service = consensus_service self._consensus_thread_pool = consensus_thread_pool self._client_thread_pool = client_thread_pool self._sig_pool = sig_pool self._context_manager = context_manager self._transaction_executor = transaction_executor self._genesis_controller = genesis_controller self._gossip = gossip self._block_publisher = block_publisher self._chain_controller = chain_controller self._block_validator = block_validator
def create_completer(self): self.completer = Completer(self.on_block_received, self.completer_conditon, self.completer_queue) self.completer.start()