def _read_signing_keys(key_filename): """Reads the given file as a WIF formatted key. Args: key_filename: The filename where the key is stored. If None, defaults to the default key for the current user. Returns: tuple (str, str): the public and private key pair Raises: CliException: If unable to read the file. """ filename = key_filename if filename is None: filename = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys', getpass.getuser() + '.priv') try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() public_key = signing.generate_public_key(signing_key) return public_key, signing_key except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e)))
def test_authorization_challenge_submit_bad_signature(self): """ Test the AuthorizationChallengeSubmitHandler returns an AuthorizationViolation and closes the connection if the signature is not verified. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) payload = os.urandom(10) signature = signing.sign(payload, private_key) auth_challenge_submit = AuthorizationChallengeSubmit( public_key="other", payload=payload, signature=signature, roles=[RoleType.Value("NETWORK")]) roles = {"network": AuthorizationType.TRUST} network = MockNetwork(roles, connection_status={ "connection_id": ConnectionStatus.AUTH_CHALLENGE_REQUEST }) permission_verifer = MockPermissionVerifier() gossip = MockGossip() handler = AuthorizationChallengeSubmitHandler(network, permission_verifer, gossip) handler_status = handler.handle( "connection_id", auth_challenge_submit.SerializeToString()) self.assertEqual(handler_status.status, HandlerStatus.RETURN_AND_CLOSE) self.assertEqual(handler_status.message_type, validator_pb2.Message.AUTHORIZATION_VIOLATION)
def __init__(self, context_manager, transaction_executor, completer, block_store, state_view_factory, identity_key, data_dir, config_dir, chain_id_manager, batch_sender): """Creates a GenesisController. Args: context_manager (:obj:`ContextManager`): A `ContextManager` instance. transaction_executor (:obj:`TransactionExecutor`): A TransactionExecutor instance. completer (:obj:`Completer`): A Completer instance. block_store (:obj:): The block store, with dict-like access. state_view_factory (:obj:`StateViewFactory`): The state view factory for creating state views during processing. identity_key (str): A private key used for signing blocks, in hex. data_dir (str): The directory for data files. config_dir (str): The directory for config files. chain_id_manager (ChainIdManager): utility class to manage the chain id file. batch_sender: interface to broadcast batches to the network. """ self._context_manager = context_manager self._transaction_executor = transaction_executor self._completer = completer self._block_store = block_store self._state_view_factory = state_view_factory self._identity_priv_key = identity_key self._identity_public_key = \ signing.generate_public_key(self._identity_priv_key) self._data_dir = data_dir self._config_dir = config_dir self._chain_id_manager = chain_id_manager self._batch_sender = batch_sender
def _read_signing_keys(key_filename): """Reads the given file as a default-encoded private key Args: key_filename: The filename where the key is stored. If None, defaults to the default key for the validator Returns: tuple (str, str): the public and private key pair Raises: CliException: If unable to read the file. """ filename = key_filename if key_filename is None: filename = os.path.join(config.get_key_dir(), 'validator.priv') try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() public_key = signing.generate_public_key(signing_key) return public_key, signing_key except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e)))
def create_chain(num=10): priv_key = signer.generate_private_key() pub_key = signer.generate_public_key(priv_key) counter = 1 previous_block_id = "0000000000000000" blocks = [] while counter <= num: current_block_id = uuid4().hex txns = [ t[0] for t in [ create_transaction(payload=uuid4().hex.encode(), private_key=priv_key, public_key=pub_key) for _ in range(20) ] ] txn_ids = [t.header_signature for t in txns] batch = create_batch(transactions=txns, public_key=pub_key, private_key=priv_key) blk_w = create_block(counter, previous_block_id, current_block_id, batches=[batch]) blocks.append((current_block_id, blk_w, txn_ids)) counter += 1 previous_block_id = current_block_id return blocks
def __init__(self, delegate, args): super(NoopWorkload, self).__init__(delegate, args) self._urls = [] self._lock = threading.Lock() self._delegate = delegate self._private_key = signing.generate_private_key() self._public_key = signing.generate_public_key(self._private_key)
def _create_key(self, key_name='validator.priv'): private_key = signing.generate_private_key() priv_file = os.path.join(self._temp_dir, key_name) with open(priv_file, 'w') as priv_fd: priv_fd.write(private_key) return signing.generate_public_key(private_key)
def do_keygen(args): if args.key_name is not None: key_name = args.key_name else: key_name = getpass.getuser() if args.key_dir is not None: key_dir = args.key_dir if not os.path.exists(key_dir): raise CliException('no such directory: {}'.format(key_dir)) else: key_dir = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys') if not os.path.exists(key_dir): if not args.quiet: print('creating key directory: {}'.format(key_dir)) try: os.makedirs(key_dir) except IOError as e: raise CliException('IOError: {}'.format(str(e))) priv_filename = os.path.join(key_dir, key_name + '.priv') pub_filename = os.path.join(key_dir, key_name + '.pub') if not args.force: file_exists = False for filename in [priv_filename, pub_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) try: priv_exists = os.path.exists(priv_filename) with open(priv_filename, 'w') as priv_fd: if not args.quiet: if priv_exists: print('overwriting file: {}'.format(priv_filename)) else: print('writing file: {}'.format(priv_filename)) priv_fd.write(private_key) priv_fd.write('\n') pub_exists = os.path.exists(pub_filename) with open(pub_filename, 'w') as pub_fd: if not args.quiet: if pub_exists: print('overwriting file: {}'.format(pub_filename)) else: print('writing file: {}'.format(pub_filename)) pub_fd.write(public_key) pub_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def __init__(self, consensus_module, block_cache, new_block, state_view_factory, done_cb, executor, squash_handler, identity_signing_key, data_dir, config_dir, permission_verifier): """Initialize the BlockValidator Args: consensus_module: The consensus module that contains implementation of the consensus algorithm to use for block validation. block_cache: The cache of all recent blocks and the processing state associated with them. new_block: The block to validate. state_view_factory: The factory object to create. done_cb: The method to call when block validation completed executor: The thread pool to process block validations. squash_handler: A parameter passed when creating transaction schedulers. identity_signing_key: Private key for signing blocks. data_dir: Path to location where persistent data for the consensus module can be stored. config_dir: Path to location where config data for the consensus module can be found. Returns: None """ self._consensus_module = consensus_module self._block_cache = block_cache self._chain_commit_state = ChainCommitState( self._block_cache.block_store, []) self._new_block = new_block # Set during execution of the of the BlockValidation to the current # chain_head at that time. self._chain_head = None self._state_view_factory = state_view_factory self._done_cb = done_cb self._executor = executor self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_public_key(self._identity_signing_key) self._data_dir = data_dir self._config_dir = config_dir self._result = { 'new_block': new_block, 'chain_head': None, 'new_chain': [], 'cur_chain': [], 'committed_batches': [], 'uncommitted_batches': [], 'execution_results': [], 'num_transactions': 0 } self._permission_verifier = permission_verifier self._validation_rule_enforcer = \ ValidationRuleEnforcer(SettingsViewFactory(state_view_factory))
def __init__(self, rest_endpoint): self.priv_key = signer.generate_private_key() self.pub_key = signer.generate_public_key(self.priv_key) self._priv_key_file = os.path.join("/tmp", uuid4().hex[:20]) with open(self._priv_key_file, mode='w') as out: out.write(self.priv_key) self._rest_endpoint = rest_endpoint
def __init__(self, rest_endpoint): self.priv_key = signer.generate_private_key() self.pub_key = signer.generate_public_key(self.priv_key) self._namespace = hashlib.sha512('intkey'.encode()).hexdigest()[:6] self._factory = MessageFactory( 'intkey', '1.0', self._namespace) self._rest = RestClient(rest_endpoint)
def test_set_status(self): """Tests that set_status() has the correct behavior. Basically: 1. Adds a batch which has two transactions. 2. Calls next_transaction() to get the first Transaction. 3. Calls next_transaction() to verify that it returns None. 4. Calls set_status() to mark the first transaction applied. 5. Calls next_transaction() to get the second Transaction. Step 3 returns None because the first transaction hasn't been marked as applied, and the SerialScheduler will only return one not-applied Transaction at a time. Step 5 is expected to return the second Transaction, not None, since the first Transaction was marked as applied in the previous step. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txns = [] for name in ['a', 'b']: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEqual('a', scheduled_txn_info.txn.payload.decode()) self.assertIsNone(self.scheduler.next_transaction()) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( scheduled_txn_info.txn.header_signature, is_valid=True, context_id=c_id) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEqual('b', scheduled_txn_info.txn.payload.decode())
def setUp(self): self.block_store = BlockStore({}) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_private_key() self.public_key = signing.generate_public_key(self.private_key) self.blocks = [] self.batches = []
def __init__(self, delegate, args): super(IntKeyWorkload, self).__init__(delegate, args) self._auth_info = args.auth_info self._urls = [] self._pending_batches = {} self._lock = threading.Lock() self._delegate = delegate self._deps = {} self._private_key = signing.generate_private_key() self._public_key = signing.generate_public_key(self._private_key)
def do_keygen(args): """Executes the key generation operation, given the parsed arguments. Args: args (:obj:`Namespace`): The parsed args. """ if args.key_name is not None: key_name = args.key_name else: key_name = 'validator' key_dir = get_key_dir() if not os.path.exists(key_dir): raise CliException("Key directory does not exist: {}".format(key_dir)) priv_filename = os.path.join(key_dir, key_name + '.priv') pub_filename = os.path.join(key_dir, key_name + '.pub') if not args.force: file_exists = False for filename in [priv_filename, pub_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) try: priv_exists = os.path.exists(priv_filename) with open(priv_filename, 'w') as priv_fd: if not args.quiet: if priv_exists: print('overwriting file: {}'.format(priv_filename)) else: print('writing file: {}'.format(priv_filename)) priv_fd.write(private_key) priv_fd.write('\n') pub_exists = os.path.exists(pub_filename) with open(pub_filename, 'w') as pub_fd: if not args.quiet: if pub_exists: print('overwriting file: {}'.format(pub_filename)) else: print('writing file: {}'.format(pub_filename)) pub_fd.write(public_key) pub_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def setUp(self): self.block_store = BlockStore(DictDatabase( indexes=BlockStore.create_index_configuration())) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_private_key() self.public_key = signing.generate_public_key(self.private_key) self.blocks = [] self.batches = []
def __init__(self, identity_signing_key, batch_sender): """Initialize the BatchPublisher. :param identity_signing_key: the validator's signing key. :param batch_sender: interface to an object that will post the built batch to the network. """ self.identity_signing_key = identity_signing_key self._batch_sender = batch_sender self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_public_key(self._identity_signing_key)
def _create_batches(self): test_yaml = self._yaml_from_file() priv_key = signing.generate_private_key() pub_key = signing.generate_public_key(priv_key) batches, batch_results = self._process_batches(yaml_batches=test_yaml, priv_key=priv_key, pub_key=pub_key) self._batch_results = batch_results self._batches = batches
def test_transaction_order(self): """Tests the that transactions are returned in order added. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. This test also creates a second iterator and verifies that both iterators return the same transactions. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) iterable2 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertEqual(scheduled_txn_info, next(iterable2)) self.assertIsNotNone(scheduled_txn_info) self.assertEqual(txn.payload, scheduled_txn_info.txn.payload) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( txn.header_signature, True, c_id) with self.assertRaises(StopIteration): next(iterable1)
def setUp(self): self.private_key = signing.generate_private_key() self.public_key = signing.generate_public_key(self.private_key) self._identity_view_factory = MockIdentityViewFactory() self.permissions = {} self._identity_cache = IdentityCache(self._identity_view_factory, self._current_root_func) self.permission_verifier = \ PermissionVerifier( permissions=self.permissions, current_root_func=self._current_root_func, identity_cache=self._identity_cache)
def test_completion_on_last_result(self): """Tests the that the schedule is not marked complete until the last result is set. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. Test that the value of `complete` is false until the last value. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator, and the complete is true in the at the end. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertFalse(self.scheduler.complete(block=False)) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( txn.header_signature, True, c_id) self.assertTrue(self.scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable1)
def __init__(self, block_store, state_view_factory, signing_key): """ Args: block_store (:obj:`BlockStore`): The block store, for passing to batch injectors that require it. state_view_factory (:obj:`StateViewFactory`): The state view factory, for passing to injectors that require it. signing_key (str): The signing key of the validator. public_key (str): The public key of the validator. """ self._block_store = block_store self._state_view_factory = state_view_factory self._signing_key = signing_key self._public_key = signing.generate_public_key(signing_key)
def do_generate(args): private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) words = generate_word_list(args.pool_size) batches = [] start = time.time() total_txn_count = 0 for i in range(0, args.count): txns = [] for _ in range(0, random.randint(1, args.batch_max_size)): txn = create_intkey_transaction( verb=random.choice(['inc', 'dec']), name=random.choice(words), value=1, private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) if i % 100 == 0 and i != 0: stop = time.time() txn_count = 0 for batch in batches[-100:]: txn_count += len(batch.transactions) fmt = 'batches {}, batch/sec: {:.2f}, txns: {}, txns/sec: {:.2f}' print(fmt.format( str(i), 100 / (stop - start), str(total_txn_count), txn_count / (stop - start))) start = stop batch_list = batch_pb2.BatchList(batches=batches) print("Writing to {}...".format(args.output)) with open(args.output, "wb") as fd: fd.write(batch_list.SerializeToString())
def _completion_on_finalize_only_when_done(self, scheduler): """Tests that complete will only be true when the scheduler has had finalize called and all txns have execution result set. Notes: Adds one batch and transaction, then verifies the iterable returns that transaction. Finalizes then sets the execution result. The schedule should not be marked as complete until after the execution result is set. This check is useful in making sure the finalize() can occur after all set_transaction_execution_result()s have been performed, because in a normal situation, finalize will probably occur prior to those calls. This test should work for both a serial and parallel scheduler. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txn, _ = create_transaction( payload='a'.encode(), private_key=private_key, public_key=public_key) batch = create_batch( transactions=[txn], private_key=private_key, public_key=public_key) iterable = iter(scheduler) scheduler.add_batch(batch) scheduled_txn_info = next(iterable) self.assertIsNotNone(scheduled_txn_info) self.assertEqual(txn.payload, scheduled_txn_info.txn.payload) scheduler.finalize() self.assertFalse(scheduler.complete(block=False)) scheduler.set_transaction_execution_result( txn.header_signature, False, None) self.assertTrue(scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable)
def _completion_on_finalize(self, scheduler): """Tests that iteration will stop when finalized is called on an otherwise complete scheduler. Notes: Adds one batch and transaction, then verifies the iterable returns that transaction. Sets the execution result and then calls finalize. Since the the scheduler is complete (all transactions have had results set, and it's been finalized), we should get a StopIteration. This check is useful in making sure the finalize() can occur after all set_transaction_execution_result()s have been performed, because in a normal situation, finalize will probably occur prior to those calls. This test should work for both a serial and parallel scheduler. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txn, _ = create_transaction( payload='a'.encode(), private_key=private_key, public_key=public_key) batch = create_batch( transactions=[txn], private_key=private_key, public_key=public_key) iterable = iter(scheduler) scheduler.add_batch(batch) scheduled_txn_info = next(iterable) self.assertIsNotNone(scheduled_txn_info) self.assertEqual(txn.payload, scheduled_txn_info.txn.payload) scheduler.set_transaction_execution_result( txn.header_signature, False, None) scheduler.finalize() with self.assertRaises(StopIteration): next(iterable)
def do_generate(args, batches, keys): private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) start = time.time() total_txn_count = 0 for i in range(0, args.count): txns = [] for _ in range(0, random.randint(1, args.max_batch_size)): name = random.choice(list(keys)) txn = create_intkey_transaction( verb=random.choice(['inc', 'dec']), name=name, value=random.randint(1, 10), deps=[keys[name]], private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) if i % 100 == 0 and i != 0: stop = time.time() txn_count = 0 for batch in batches[-100:]: txn_count += len(batch.transactions) fmt = 'batches {}, batch/sec: {:.2f}, txns: {}, txns/sec: {:.2f}' print(fmt.format( str(i), 100 / (stop - start), str(total_txn_count), txn_count / (stop - start))) start = stop
def __init__(self, private=None, public=None): self._factory = MessageFactory( family_name="sawtooth_validator_registry", family_version="1.0", namespace="6a4372", private=private, public=public ) self.public_key_hash = hashlib.sha256(public.encode()).hexdigest() self._report_private_key = \ serialization.load_pem_private_key( self.__REPORT_PRIVATE_KEY_PEM__.encode(), password=None, backend=backends.default_backend()) # First we need to create a public/private key pair for the PoET # enclave to use. self._poet_private_key = \ "1f70fa2518077ad18483f48e77882d11983b537fa5f7cf158684d2c670fe4f1f" self.poet_public_key = \ signing.generate_public_key(self._poet_private_key)
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.block_store = BlockStore( DictDatabase(indexes=BlockStore.create_index_configuration())) self.block_cache = BlockCache(self.block_store) self.state_db = {} # add the mock reference to the consensus consensus_setting_addr = SettingsView.setting_address( 'sawtooth.consensus.algorithm') self.state_db[consensus_setting_addr] = _setting_entry( 'sawtooth.consensus.algorithm', 'test_journal.mock_consensus') self.state_view_factory = MockStateViewFactory(self.state_db) self.signing_key = signing.generate_private_key() self.public_key = signing.generate_public_key(self.signing_key) self.identity_signing_key = signing.generate_private_key() chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() self.set_chain_head(self.genesis_block) chain_head = self.genesis_block self.block_publisher = BlockPublisher( transaction_executor=MockTransactionExecutor(), block_cache=self.block_cache, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, squash_handler=None, chain_head=chain_head, identity_signing_key=self.identity_signing_key, data_dir=None, config_dir=None, permission_verifier=MockPermissionVerifier(), check_publish_block_frequency=0.1, batch_observers=[])
def _read_signing_keys(key_filename): """Reads the given file as a WIF formatted key. Args: key_filename: The filename where the key is stored. Returns: tuple (str, str): the public and private key pair Raises: CliException: If unable to read the file. """ filename = key_filename try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() public_key = signing.generate_public_key(signing_key) return public_key, signing_key except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e)))
def __init__(self, block_cache, block_sender, state_view_factory, transaction_executor, chain_head_lock, on_chain_updated, squash_handler, chain_id_manager, identity_signing_key, data_dir, config_dir, permission_verifier, chain_observers, thread_pool=None, metrics_registry=None): """Initialize the ChainController Args: block_cache: The cache of all recent blocks and the processing state associated with them. block_sender: an interface object used to send blocks to the network. state_view_factory: The factory object to create transaction_executor: The TransactionExecutor used to produce schedulers for batch validation. chain_head_lock: Lock to hold while the chain head is being updated, this prevents other components that depend on the chain head and the BlockStore from having the BlockStore change under them. This lock is only for core Journal components (BlockPublisher and ChainController), other components should handle block not found errors from the BlockStore explicitly. on_chain_updated: The callback to call to notify the rest of the system the head block in the chain has been changed. squash_handler: a parameter passed when creating transaction schedulers. chain_id_manager: The ChainIdManager instance. identity_signing_key: Private key for signing blocks. data_dir: path to location where persistent data for the consensus module can be stored. config_dir: path to location where config data for the consensus module can be found. chain_observers (list of :obj:`ChainObserver`): A list of chain observers. Returns: None """ self._lock = RLock() self._chain_head_lock = chain_head_lock self._block_cache = block_cache self._block_store = block_cache.block_store self._state_view_factory = state_view_factory self._block_sender = block_sender self._transaction_executor = transaction_executor self._notify_on_chain_updated = on_chain_updated self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_public_key(self._identity_signing_key) self._data_dir = data_dir self._config_dir = config_dir self._blocks_processing = {} # a set of blocks that are # currently being processed. self._blocks_pending = {} # set of blocks that the previous block # is being processed. Once that completes this block will be # scheduled for validation. self._chain_id_manager = chain_id_manager self._chain_head = None self._permission_verifier = permission_verifier self._chain_observers = chain_observers if metrics_registry: self._chain_head_gauge = GaugeWrapper( metrics_registry.gauge('chain_head', default='no chain head')) self._committed_transactions_count = CounterWrapper( metrics_registry.counter('committed_transactions_count')) self._block_num_gauge = GaugeWrapper( metrics_registry.gauge('block_num')) else: self._chain_head_gauge = GaugeWrapper() self._committed_transactions_count = CounterWrapper() self._block_num_gauge = GaugeWrapper() self._block_queue = queue.Queue() self._thread_pool = \ InstrumentedThreadPoolExecutor(1) \ if thread_pool is None else thread_pool self._chain_thread = None # Only run this after all member variables have been bound self._set_chain_head_from_block_store()