def do_populate(args, batches, keys): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) total_txn_count = 0 txns = [] for i in range(0, len(keys)): name = list(keys)[i] txn = create_intkey_transaction( verb='set', name=name, value=random.randint(9000, 100000), deps=[], private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) # Establish the signature of the txn associated with the word # so we can create good dependencies later keys[name] = txn.header_signature batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch)
def do_populate(args): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) words = generate_word_list(args.pool_size) batches = [] total_txn_count = 0 txns = [] for i in range(0, len(words)): txn = create_intkey_transaction( verb='set', name=words[i], value=random.randint(9000, 100000), private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) batch_list = batch_pb2.BatchList(batches=batches) print("Writing to {}...".format(args.output)) with open(args.output, "wb") as fd: fd.write(batch_list.SerializeToString())
def test_authorization_challenge_submit(self): """ Test the AuthorizationChallengeSubmitHandler returns an AuthorizationViolation and closes the connection if the permission verifier does not permit the public_key. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) payload = os.urandom(10) signature = signing.sign(payload, private_key) auth_challenge_submit = AuthorizationChallengeSubmit( public_key=public_key, payload=payload, signature=signature, roles=[RoleType.Value("NETWORK")]) roles = {"network": AuthorizationType.TRUST} network = MockNetwork( roles, connection_status={"connection_id": ConnectionStatus.AUTH_CHALLENGE_REQUEST}) permission_verifer = MockPermissionVerifier(allow=False) gossip = MockGossip() handler = AuthorizationChallengeSubmitHandler( network, permission_verifer, gossip) handler_status = handler.handle( "connection_id", auth_challenge_submit.SerializeToString()) self.assertEqual(handler_status.status, HandlerStatus.RETURN_AND_CLOSE) self.assertEqual( handler_status.message_type, validator_pb2.Message.AUTHORIZATION_VIOLATION)
def test_add_to_finalized_scheduler(self): """Tests that a finalized scheduler raise exception on add_batch(). This test creates a scheduler, finalizes it, and calls add_batch(). The result is expected to be a SchedulerError, since adding a batch to a finalized scheduler is invalid. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) # Finalize prior to attempting to add a batch. self.scheduler.finalize() txn, _ = create_transaction(payload='a'.encode(), private_key=private_key, public_key=public_key) batch = create_batch(transactions=[txn], private_key=private_key, public_key=public_key) # scheduler.add_batch(batch) should throw a SchedulerError due to # the finalized status of the scheduler. self.assertRaises(SchedulerError, lambda: self.scheduler.add_batch(batch))
def test_set_result_on_unscheduled_txn(self): """Tests that a scheduler will reject a result on an unscheduled transaction. Creates a batch with a single transaction, adds the batch to the scheduler, then immediately attempts to set the result for the transaction without first causing it to be scheduled (by using an iterator or calling next_transaction()). """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txn, _ = create_transaction(payload='a'.encode(), private_key=private_key, public_key=public_key) batch = create_batch(transactions=[txn], private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.assertRaises( SchedulerError, lambda: self.scheduler.set_transaction_execution_result( txn.header_signature, False, None))
def setUp(self): self.private_key = signing.generate_privkey() self.public_key = signing.generate_pubkey(self.private_key) self._identity_view_factory = MockIdentityViewFactory() self.permission_verifier = \ PermissionVerifier(self._identity_view_factory, self._current_root_func)
def _read_signing_keys(key_filename): """Reads the given file as a default-encoded private key Args: key_filename: The filename where the key is stored. If None, defaults to the default key for the validator Returns: tuple (str, str): the public and private key pair Raises: CliException: If unable to read the file. """ filename = key_filename if key_filename is None: filename = os.path.join(config.get_key_dir(), 'validator.priv') try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() pubkey = signing.generate_pubkey(signing_key) return pubkey, signing_key except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e)))
def __init__(self, context_manager, transaction_executor, completer, block_store, state_view_factory, identity_key, data_dir, config_dir, chain_id_manager, batch_sender): """Creates a GenesisController. Args: context_manager (:obj:`ContextManager`): A `ContextManager` instance. transaction_executor (:obj:`TransactionExecutor`): A TransactionExecutor instance. completer (:obj:`Completer`): A Completer instance. block_store (:obj:): The block store, with dict-like access. state_view_factory (:obj:`StateViewFactory`): The state view factory for creating state views during processing. identity_key (str): A private key used for signing blocks, in hex. data_dir (str): The directory for data files. config_dir (str): The directory for config files. chain_id_manager (ChainIdManager): utility class to manage the chain id file. batch_sender: interface to broadcast batches to the network. """ self._context_manager = context_manager self._transaction_executor = transaction_executor self._completer = completer self._block_store = block_store self._state_view_factory = state_view_factory self._identity_priv_key = identity_key self._identity_public_key = \ signing.generate_pubkey(self._identity_priv_key) self._data_dir = data_dir self._config_dir = config_dir self._chain_id_manager = chain_id_manager self._batch_sender = batch_sender
def do_populate(args, batches, words): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) total_txn_count = 0 txns = [] for i in range(0, len(words)): name = list(words)[i] txn = create_intkey_transaction(verb='set', name=name, value=random.randint(9000, 100000), deps=[], private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) # Establish the signature of the txn associated with the word # so we can create good dependencies later words[name] = txn.header_signature batch = create_batch(transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch)
def _create_key(self, key_name='validator.priv'): privkey = signing.generate_privkey() priv_file = os.path.join(self._temp_dir, key_name) with open(priv_file, 'w') as priv_fd: priv_fd.write(privkey) return signing.generate_pubkey(privkey)
def __init__(self, delegate, args): super(NoopWorkload, self).__init__(delegate, args) self._urls = [] self._lock = threading.Lock() self._delegate = delegate self._private_key = signing.generate_privkey() self._public_key = signing.generate_pubkey(self._private_key)
def _read_signing_keys(key_filename): """Reads the given file as a WIF formatted key. Args: key_filename: The filename where the key is stored. If None, defaults to the default key for the current user. Returns: tuple (str, str): the public and private key pair Raises: CliException: If unable to read the file. """ filename = key_filename if filename is None: filename = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys', getpass.getuser() + '.priv') try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() pubkey = signing.generate_pubkey(signing_key) return pubkey, signing_key except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e)))
def _create_key(self, key_name='validator.wif'): privkey = signing.generate_privkey() wif_file = os.path.join(self._temp_dir, key_name) with open(wif_file, 'w') as wif_fd: wif_fd.write(privkey) return signing.generate_pubkey(privkey)
def _read_signing_keys(key_filename): """Reads the given file as a WIF formatted key. Args: key_filename: The filename where the key is stored. If None, defaults to the default key for the validator Returns: tuple (str, str): the public and private key pair Raises: CliException: If unable to read the file. """ filename = key_filename if key_filename is None: filename = os.path.join(config.get_key_dir(), 'validator.priv') try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() pubkey = signing.generate_pubkey(signing_key) return pubkey, signing_key except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e)))
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.block_store = BlockStore(DictDatabase()) self.block_cache = BlockCache(self.block_store) self.state_db = {} # add the mock reference to the consensus consensus_setting_addr = SettingsView.setting_address( 'sawtooth.consensus.algorithm') self.state_db[consensus_setting_addr] = _setting_entry( 'sawtooth.consensus.algorithm', 'test_journal.mock_consensus') self.state_view_factory = MockStateViewFactory(self.state_db) self.signing_key = signing.generate_privkey() self.public_key = signing.generate_pubkey(self.signing_key) self.identity_signing_key = signing.generate_privkey() chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() self.set_chain_head(self.genesis_block) chain_head = self.genesis_block self.block_publisher = BlockPublisher( transaction_executor=MockTransactionExecutor(), block_cache=self.block_cache, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, squash_handler=None, chain_head=chain_head, identity_signing_key=self.identity_signing_key, data_dir=None, config_dir=None)
async def create_new_user(request): required_fields = ['name', 'password'] utils.validate_fields(required_fields, request.json) # Generate keys private_key = signing.generate_privkey(privkey_format='bytes') public_key = signing.generate_pubkey(private_key, privkey_format='bytes') txn_key = Key(public_key, private_key) encrypted_private_key = utils.encrypt_private_key( request.app.config.AES_KEY, public_key, private_key) # Build create user transaction batch_list = create_user(txn_key, request.app.config.BATCHER_KEY_PAIR, request.json.get('name'), public_key, request.json.get('metadata'), request.json.get('manager')) # Submit transaction and wait for complete await utils.send(request.app.config.VAL_CONN, batch_list[0], request.app.config.TIMEOUT) # Save new user in auth table hashed_password = hashlib.sha256( request.json.get('password').encode('utf-8')).hexdigest() auth_entry = { 'user_id': public_key, 'hashed_password': hashed_password, 'encrypted_private_key': encrypted_private_key, 'email': request.json.get('email') } await auth_query.create_auth_entry(request.app.config.DB_CONN, auth_entry) # Send back success response return create_user_response(request, public_key)
def __init__(self, with_genesis=True): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.block_store = BlockStore(DictDatabase()) self.block_cache = BlockCache(self.block_store) self.state_db = {} # add the mock reference to the consensus consensus_setting_addr = ConfigView.setting_address( 'sawtooth.consensus.algorithm') self.state_db[consensus_setting_addr] = _setting_entry( 'sawtooth.consensus.algorithm', 'test_journal.mock_consensus') self.state_view_factory = MockStateViewFactory(self.state_db) self.signing_key = signing.generate_privkey() self.public_key = signing.generate_pubkey(self.signing_key) self.identity_signing_key = signing.generate_privkey() chain_head = None if with_genesis: self.genesis_block = self.generate_genesis_block() self.set_chain_head(self.genesis_block) chain_head = self.genesis_block self.block_publisher = BlockPublisher( transaction_executor=MockTransactionExecutor(), block_cache=self.block_cache, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, squash_handler=None, chain_head=chain_head, identity_signing_key=self.identity_signing_key, data_dir=None, config_dir=None)
def create_chain(num=10): priv_key = signer.generate_privkey() pub_key = signer.generate_pubkey(priv_key) counter = 1 previous_block_id = "0000000000000000" blocks = [] while counter <= num: current_block_id = uuid4().hex txns = [ t[0] for t in [ create_transaction(payload=uuid4().hex.encode(), private_key=priv_key, public_key=pub_key) for _ in range(20) ] ] txn_ids = [t.header_signature for t in txns] batch = create_batch(transactions=txns, public_key=pub_key, private_key=priv_key) blk_w = create_block(counter, previous_block_id, current_block_id, batches=[batch]) blocks.append((current_block_id, blk_w, txn_ids)) counter += 1 previous_block_id = current_block_id return blocks
def do_populate(args): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) words = generate_word_list(args.pool_size) batches = [] total_txn_count = 0 txns = [] for i in range(0, len(words)): txn = create_intkey_transaction(verb='set', name=words[i], value=random.randint(9000, 100000), private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch(transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) batch_list = batch_pb2.BatchList(batches=batches) print("Writing to {}...".format(args.output)) with open(args.output, "wb") as fd: fd.write(batch_list.SerializeToString())
def do_keygen(args): if args.key_name is not None: key_name = args.key_name else: key_name = getpass.getuser() if args.key_dir is not None: key_dir = args.key_dir if not os.path.exists(key_dir): raise CliException('no such directory: {}'.format(key_dir)) else: key_dir = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys') if not os.path.exists(key_dir): if not args.quiet: print('creating key directory: {}'.format(key_dir)) try: os.makedirs(key_dir) except IOError as e: raise CliException('IOError: {}'.format(str(e))) priv_filename = os.path.join(key_dir, key_name + '.priv') pub_filename = os.path.join(key_dir, key_name + '.pub') if not args.force: file_exists = False for filename in [priv_filename, pub_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) try: priv_exists = os.path.exists(priv_filename) with open(priv_filename, 'w') as priv_fd: if not args.quiet: if priv_exists: print('overwriting file: {}'.format(priv_filename)) else: print('writing file: {}'.format(priv_filename)) priv_fd.write(privkey) priv_fd.write('\n') pub_exists = os.path.exists(pub_filename) with open(pub_filename, 'w') as pub_fd: if not args.quiet: if pub_exists: print('overwriting file: {}'.format(pub_filename)) else: print('writing file: {}'.format(pub_filename)) pub_fd.write(pubkey) pub_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def do_keygen(args): if args.key_name is not None: key_name = args.key_name else: key_name = getpass.getuser() if args.key_dir is not None: key_dir = args.key_dir if not os.path.exists(key_dir): raise CliException('no such directory: {}'.format(key_dir)) else: key_dir = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys') if not os.path.exists(key_dir): if not args.quiet: print('creating key directory: {}'.format(key_dir)) try: os.makedirs(key_dir) except IOError as e: raise CliException('IOError: {}'.format(str(e))) wif_filename = os.path.join(key_dir, key_name + '.wif') addr_filename = os.path.join(key_dir, key_name + '.addr') if not args.force: file_exists = False for filename in [wif_filename, addr_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) addr = signing.generate_identifier(pubkey) try: wif_exists = os.path.exists(wif_filename) with open(wif_filename, 'w') as wif_fd: if not args.quiet: if wif_exists: print('overwriting file: {}'.format(wif_filename)) else: print('writing file: {}'.format(wif_filename)) wif_fd.write(privkey) wif_fd.write('\n') addr_exists = os.path.exists(addr_filename) with open(addr_filename, 'w') as addr_fd: if not args.quiet: if addr_exists: print('overwriting file: {}'.format(addr_filename)) else: print('writing file: {}'.format(addr_filename)) addr_fd.write(addr) addr_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def __init__(self, consensus_module, block_cache, new_block, state_view_factory, done_cb, executor, squash_handler, identity_signing_key, data_dir, config_dir, permission_verifier): """Initialize the BlockValidator Args: consensus_module: The consensus module that contains implementation of the consensus algorithm to use for block validation. block_cache: The cache of all recent blocks and the processing state associated with them. new_block: The block to validate. state_view_factory: The factory object to create. done_cb: The method to call when block validation completed executor: The thread pool to process block validations. squash_handler: A parameter passed when creating transaction schedulers. identity_signing_key: Private key for signing blocks. data_dir: Path to location where persistent data for the consensus module can be stored. config_dir: Path to location where config data for the consensus module can be found. Returns: None """ self._consensus_module = consensus_module self._block_cache = block_cache self._chain_commit_state = ChainCommitState( self._block_cache.block_store, []) self._new_block = new_block # Set during execution of the of the BlockValidation to the current # chain_head at that time. self._chain_head = None self._state_view_factory = state_view_factory self._done_cb = done_cb self._executor = executor self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_pubkey(self._identity_signing_key) self._data_dir = data_dir self._config_dir = config_dir self._result = { 'new_block': new_block, 'chain_head': None, 'new_chain': [], 'cur_chain': [], 'committed_batches': [], 'uncommitted_batches': [], 'execution_results': [], 'num_transactions': 0 } self._permission_verifier = permission_verifier self._validation_rule_enforcer = \ ValidationRuleEnforcer(SettingsViewFactory(state_view_factory))
def __init__(self, delegate, args): super(IntKeyWorkload, self).__init__(delegate, args) self._streams = [] self._pending_batches = {} self._lock = threading.Lock() self._delegate = delegate self._deps = {} self._private_key = signing.generate_privkey() self._public_key = signing.generate_pubkey(self._private_key)
def __init__(self, rest_endpoint): self.priv_key = signer.generate_privkey() self.pub_key = signer.generate_pubkey(self.priv_key) self._priv_key_file = os.path.join("/tmp", uuid4().hex[:20]) with open(self._priv_key_file, mode='w') as out: out.write(self.priv_key) self._rest_endpoint = rest_endpoint
def test_set_status(self): """Tests that set_status() has the correct behavior. Basically: 1. Adds a batch which has two transactions. 2. Calls next_transaction() to get the first Transaction. 3. Calls next_transaction() to verify that it returns None. 4. Calls set_status() to mark the first transaction applied. 5. Calls next_transaction() to get the second Transaction. Step 3 returns None because the first transaction hasn't been marked as applied, and the SerialScheduler will only return one not-applied Transaction at a time. Step 5 is expected to return the second Transaction, not None, since the first Transaction was marked as applied in the previous step. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txns = [] for name in ['a', 'b']: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEqual('a', scheduled_txn_info.txn.payload.decode()) self.assertIsNone(self.scheduler.next_transaction()) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( scheduled_txn_info.txn.header_signature, is_valid=True, context_id=c_id) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEqual('b', scheduled_txn_info.txn.payload.decode())
def do_keygen(args): """Executes the key generation operation, given the parsed arguments. Args: args (:obj:`Namespace`): The parsed args. """ if args.key_name is not None: key_name = args.key_name else: key_name = 'validator' key_dir = get_key_dir() if not os.path.exists(key_dir): raise CliException( "Key directory does not exist: {}".format(key_dir)) priv_filename = os.path.join(key_dir, key_name + '.priv') pub_filename = os.path.join(key_dir, key_name + '.pub') if not args.force: file_exists = False for filename in [priv_filename, pub_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) try: priv_exists = os.path.exists(priv_filename) with open(priv_filename, 'w') as priv_fd: if not args.quiet: if priv_exists: print('overwriting file: {}'.format(priv_filename)) else: print('writing file: {}'.format(priv_filename)) priv_fd.write(privkey) priv_fd.write('\n') pub_exists = os.path.exists(pub_filename) with open(pub_filename, 'w') as pub_fd: if not args.quiet: if pub_exists: print('overwriting file: {}'.format(pub_filename)) else: print('writing file: {}'.format(pub_filename)) pub_fd.write(pubkey) pub_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def load_config(app): # pylint: disable=too-many-branches app.config.update(DEFAULT_CONFIG) config_file_path = os.path.join( os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'config.py') try: app.config.from_pyfile(config_file_path) except FileNotFoundError: LOGGER.warning("No config file provided") # CLI Options will override config file options opts = parse_args(sys.argv[1:]) if opts.host is not None: app.config.HOST = opts.host if opts.port is not None: app.config.PORT = opts.port if opts.validator_host is not None: app.config.VALIDATOR_HOST = opts.validator_host if opts.validator_port is not None: app.config.VALIDATOR_PORT = opts.validator_port if opts.timeout is not None: app.config.TIMEOUT = opts.timeout if opts.db_host is not None: app.config.DB_HOST = opts.db_host if opts.db_port is not None: app.config.DB_PORT = opts.db_port if opts.db_name is not None: app.config.DB_NAME = opts.db_name if opts.debug is not None: app.config.DEBUG = opts.debug if opts.secret_key is not None: app.config.SECRET_KEY = opts.secret_key if app.config.SECRET_KEY is None: LOGGER.exception("API secret key was not provided") sys.exit(1) if opts.aes_key is not None: app.config.AES_KEY = opts.aes_key if app.config.AES_KEY is None: LOGGER.exception("AES key was not provided") sys.exit(1) if opts.batcher_private_key is not None: app.config.BATCHER_PRIVATE_KEY = opts.batcher_private_key if app.config.BATCHER_PRIVATE_KEY is None: LOGGER.exception("Batcher private key was not provided") sys.exit(1) batcher_public_key = signing.generate_pubkey( app.config.BATCHER_PRIVATE_KEY, privkey_format='hex') app.config.BATCHER_KEY_PAIR = Key(batcher_public_key, app.config.BATCHER_PRIVATE_KEY)
def __init__(self, delegate, args): super(IntKeyWorkload, self).__init__(delegate, args) self._auth_info = args.auth_info self._urls = [] self._pending_batches = {} self._lock = threading.Lock() self._delegate = delegate self._deps = {} self._private_key = signing.generate_privkey() self._public_key = signing.generate_pubkey(self._private_key)
def setUp(self): self.block_store = BlockStore({}) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_privkey() self.public_key = signing.generate_pubkey(self.private_key) self.blocks = [] self.batches = []
def __init__(self, rest_endpoint): self.priv_key = signer.generate_privkey() self.pub_key = signer.generate_pubkey(self.priv_key) self._namespace = hashlib.sha512('intkey'.encode()).hexdigest()[:6] self._factory = MessageFactory( 'application/cbor', 'intkey', '1.0', self._namespace) self._rest = RestClient(rest_endpoint)
def _generate_genesis_block(self): """ Returns a blocker wrapper with the basics of the block header in place """ genesis_header = block_pb2.BlockHeader( block_num=0, previous_block_id=NULL_BLOCK_IDENTIFIER, signer_pubkey=signing.generate_pubkey(self._identity_priv_key)) return BlockBuilder(genesis_header)
def __init__(self, transaction_executor, block_cache, state_view_factory, block_sender, batch_sender, squash_handler, chain_head, identity_signing_key, data_dir, config_dir, permission_verifier, batch_injector_factory=None): """ Initialize the BlockPublisher object Args: transaction_executor (:obj:`TransactionExecutor`): A TransactionExecutor instance. block_cache (:obj:`BlockCache`): A BlockCache instance. state_view_factory (:obj:`StateViewFactory`): StateViewFactory for read-only state views. block_sender (:obj:`BlockSender`): The BlockSender instance. batch_sender (:obj:`BatchSender`): The BatchSender instance. squash_handler (function): Squash handler function for merging contexts. chain_head (:obj:`BlockWrapper`): The initial chain head. identity_signing_key (str): Private key for signing blocks data_dir (str): path to location where persistent data for the consensus module can be stored. config_dir (str): path to location where configuration can be found. batch_injector_factory (:obj:`BatchInjectorFatctory`): A factory for creating BatchInjectors.""" self._lock = RLock() self._candidate_block = None # _CandidateBlock helper, # the next block in potential chain self._block_cache = block_cache self._state_view_factory = state_view_factory self._transaction_executor = transaction_executor self._block_sender = block_sender self._batch_publisher = BatchPublisher(identity_signing_key, batch_sender) self._pending_batches = [] # batches we are waiting for validation, # arranged in the order of batches received. self._chain_head = chain_head # block (BlockWrapper) self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_pubkey(self._identity_signing_key) self._data_dir = data_dir self._config_dir = config_dir self._permission_verifier = permission_verifier self._batch_injector_factory = batch_injector_factory
def __init__(self, consensus_module, block_cache, new_block, chain_head, state_view_factory, done_cb, executor, squash_handler, identity_signing_key, data_dir, config_dir): """Initialize the BlockValidator Args: consensus_module: The consensus module that contains implementation of the consensus algorithm to use for block validation. block_cache: The cache of all recent blocks and the processing state associated with them. new_block: The block to validate. chain_head: The block at the current chain head. state_view_factory: The factory object to create. done_cb: The method to call when block validation completed executor: The thread pool to process block validations. squash_handler: A parameter passed when creating transaction schedulers. identity_signing_key: Private key for signing blocks. data_dir: Path to location where persistent data for the consensus module can be stored. config_dir: Path to location where config data for the consensus module can be found. Returns: None """ self._consensus_module = consensus_module self._block_cache = block_cache self._new_block = new_block self._chain_head = chain_head self._state_view_factory = state_view_factory self._done_cb = done_cb self._executor = executor self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_pubkey(self._identity_signing_key) self._data_dir = data_dir self._config_dir = config_dir self._result = { 'new_block': new_block, 'chain_head': chain_head, 'new_chain': [], 'cur_chain': [], 'committed_batches': [], 'uncommitted_batches': [], }
def do_keygen(args): """Executes the key generation operation, given the parsed arguments. Args: args (:obj:`Namespace`): The parsed args. """ if args.key_name is not None: key_name = args.key_name else: key_name = 'validator' key_dir = get_key_dir() if not os.path.exists(key_dir): raise CliException("Key directory does not exist: {}".format(key_dir)) priv_filename = os.path.join(key_dir, key_name + '.priv') pub_filename = os.path.join(key_dir, key_name + '.pub') if not args.force: file_exists = False for filename in [priv_filename, pub_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) try: priv_exists = os.path.exists(priv_filename) with open(priv_filename, 'w') as priv_fd: if not args.quiet: if priv_exists: print('overwriting file: {}'.format(priv_filename)) else: print('writing file: {}'.format(priv_filename)) priv_fd.write(privkey) priv_fd.write('\n') pub_exists = os.path.exists(pub_filename) with open(pub_filename, 'w') as pub_fd: if not args.quiet: if pub_exists: print('overwriting file: {}'.format(pub_filename)) else: print('writing file: {}'.format(pub_filename)) pub_fd.write(pubkey) pub_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def __init__(self, block_cache, block_sender, state_view_factory, executor, transaction_executor, on_chain_updated, squash_handler, chain_id_manager, identity_signing_key, data_dir): """Initialize the ChainController Args: block_cache: The cache of all recent blocks and the processing state associated with them. block_sender: an interface object used to send blocks to the network. state_view_factory: The factory object to create executor: The thread pool to process block validations. transaction_executor: The TransactionExecutor used to produce schedulers for batch validation. on_chain_updated: The callback to call to notify the rest of the system the head block in the chain has been changed. squash_handler: a parameter passed when creating transaction schedulers. identity_signing_key: Private key for signing blocks. data_dir: path to location where persistent data for the consensus module can be stored. Returns: None """ self._lock = RLock() self._block_cache = block_cache self._block_store = block_cache.block_store self._state_view_factory = state_view_factory self._block_sender = block_sender self._executor = executor self._transaction_executor = transaction_executor self._notify_on_chain_updated = on_chain_updated self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_pubkey(self._identity_signing_key) self._data_dir = data_dir self._blocks_processing = {} # a set of blocks that are # currently being processed. self._blocks_pending = {} # set of blocks that the previous block # is being processed. Once that completes this block will be # scheduled for validation. self._chain_id_manager = chain_id_manager try: self._chain_head = self._block_store.chain_head LOGGER.info("Chain controller initialized with chain head: %s", self._chain_head) except Exception as exc: LOGGER.error("Invalid block store. Head of the block chain cannot " "be determined") LOGGER.exception(exc) raise self._notify_on_chain_updated(self._chain_head)
def test_transaction_order(self): """Tests the that transactions are returned in order added. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. This test also creates a second iterator and verifies that both iterators return the same transactions. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) iterable2 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertEqual(scheduled_txn_info, next(iterable2)) self.assertIsNotNone(scheduled_txn_info) self.assertEqual(txn.payload, scheduled_txn_info.txn.payload) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( txn.header_signature, True, c_id) with self.assertRaises(StopIteration): next(iterable1)
def __init__(self, identity_signing_key, batch_sender): """Initialize the BatchPublisher. :param identity_signing_key: the validator's signing key. :param batch_sender: interface to an object that will post the built batch to the network. """ self.identity_signing_key = identity_signing_key self._batch_sender = batch_sender self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_pubkey(self._identity_signing_key)
def _create_batches(self): test_yaml = self._yaml_from_file() priv_key = signing.generate_privkey() pub_key = signing.generate_pubkey(priv_key) batches, batch_results = self._process_batches(yaml_batches=test_yaml, priv_key=priv_key, pub_key=pub_key) self._batch_results = batch_results self._batches = batches
def test_completion_on_last_result(self): """Tests the that the schedule is not marked complete until the last result is set. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. Test that the value of `complete` is false until the last value. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator, and the complete is true in the at the end. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertFalse(self.scheduler.complete(block=False)) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( txn.header_signature, True, c_id) self.assertTrue(self.scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable1)
def __init__(self, transaction_executor, block_cache, state_view_factory, block_sender, batch_sender, squash_handler, chain_head, identity_signing_key, data_dir, config_dir): """ Initialize the BlockPublisher object Args: transaction_executor (:obj:`TransactionExecutor`): A TransactionExecutor instance. block_cache (:obj:`BlockCache`): A BlockCache instance. state_view_factory (:obj:`StateViewFactory`): StateViewFactory for read-only state views. block_sender (:obj:`BlockSender`): The BlockSender instance. batch_sender (:obj:`BatchSender`): The BatchSender instance. squash_handler (function): Squash handler function for merging contexts. chain_head (:obj:`BlockWrapper`): The initial chain head. identity_signing_key (str): Private key for signing blocks data_dir (str): path to location where persistent data for the consensus module can be stored. config_dir (str): path to location where configuration can be found. """ self._lock = RLock() self._candidate_block = None # _CandidateBlock helper, # the next block in potential chain self._block_cache = block_cache self._state_view_factory = state_view_factory self._transaction_executor = transaction_executor self._block_sender = block_sender self._batch_publisher = BatchPublisher(identity_signing_key, batch_sender) self._pending_batches = [] # batches we are waiting for validation, # arranged in the order of batches received. self._chain_head = chain_head # block (BlockWrapper) self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_pubkey(self._identity_signing_key) self._data_dir = data_dir self._config_dir = config_dir
def do_generate(args): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) words = generate_word_list(args.pool_size) batches = [] start = time.time() total_txn_count = 0 for i in range(0, args.count): txns = [] for _ in range(0, random.randint(1, args.batch_max_size)): txn = create_intkey_transaction( verb=random.choice(['inc', 'dec']), name=random.choice(words), value=1, private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) if i % 100 == 0 and i != 0: stop = time.time() txn_count = 0 for batch in batches[-100:]: txn_count += len(batch.transactions) fmt = 'batches {}, batch/sec: {:.2f}, txns: {}, txns/sec: {:.2f}' print(fmt.format( str(i), 100 / (stop - start), str(total_txn_count), txn_count / (stop - start))) start = stop batch_list = batch_pb2.BatchList(batches=batches) print("Writing to {}...".format(args.output)) with open(args.output, "wb") as fd: fd.write(batch_list.SerializeToString())
def __init__(self, base_url, store_name=None, name='SawtoothClient', txntype_name=None, msgtype_name=None, keystring=None, keyfile=None, disable_client_validation=False): self._base_url = base_url self._message_type = msgtype_name self._transaction_type = txntype_name # an explicit store name takes precedence over a store name # implied by the transaction type self._store_name = None if store_name is not None: self._store_name = store_name.strip('/') elif txntype_name is not None: self._store_name = txntype_name.strip('/') self._communication = _Communication(base_url) self._last_transaction = None self._signing_key = None self._identifier = None self._update_batch = None self._disable_client_validation = disable_client_validation if keystring: LOGGER.debug("set signing key from string\n%s", keystring) self._signing_key = keystring elif keyfile: LOGGER.debug("set signing key from file %s", keyfile) try: self._signing_key = open(keyfile, "r").read().strip() except IOError as ex: raise ClientException( "Failed to load key file: {}".format(str(ex))) if self._signing_key is not None: self._identifier = signing.generate_identifier( signing.generate_pubkey(self._signing_key))
def _create_batches(self): test_yaml = self._yaml_from_file() priv_key = signing.generate_privkey() pub_key = signing.generate_pubkey(priv_key) batches, batch_results, batches_waiting = self._process_batches( yaml_batches=test_yaml, priv_key=priv_key, pub_key=pub_key) # if there aren't any explicit dependencies that need to be created # based on the transaction 'id' listed in the yaml, the next two # code blocks won't be run. while len(batches_waiting) > 0: b, b_r, b_w = self._process_prev_batches( unprocessed_batches=batches_waiting, priv_key=priv_key, pub_key=pub_key) if len(batches_waiting) == len(b_w): # If any process attempt doesn't produce a new batch, # there is probably a cyclic dependency break if b: for batch, key in b: ind = batches.index(key) batches[ind] = batch batch_results.update(b_r) batches_waiting = b_w # Here process the batches with transaction dependencies that can't # be computed for some reason, so just strip them out. if batches_waiting: b, b_r, b_w = self._process_prev_batches( batches_waiting, priv_key=priv_key, pub_key=pub_key, strip_deps=True) for batch, key in b: ind = batches.index(key) batches[ind] = batch batch_results.update(b_r) self._batch_results = batch_results self._batches = batches
def do_generate(args, batches, keys): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) start = time.time() total_txn_count = 0 for i in range(0, args.count): txns = [] for _ in range(0, random.randint(1, args.max_batch_size)): name = random.choice(list(keys)) txn = create_intkey_transaction( verb=random.choice(['inc', 'dec']), name=name, value=random.randint(1, 10), deps=[keys[name]], private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) if i % 100 == 0 and i != 0: stop = time.time() txn_count = 0 for batch in batches[-100:]: txn_count += len(batch.transactions) fmt = 'batches {}, batch/sec: {:.2f}, txns: {}, txns/sec: {:.2f}' print(fmt.format( str(i), 100 / (stop - start), str(total_txn_count), txn_count / (stop - start))) start = stop
def __init__(self, private=None, public=None): self._factory = MessageFactory( encoding="application/protobuf", family_name="sawtooth_validator_registry", family_version="1.0", namespace="6a4372", private=private, public=public ) self.pubkey_hash = hashlib.sha256(public.encode()).hexdigest() self._report_private_key = \ serialization.load_pem_private_key( self.__REPORT_PRIVATE_KEY_PEM__.encode(), password=None, backend=backends.default_backend()) # First we need to create a public/private key pair for the PoET # enclave to use. self._poet_private_key = \ "1f70fa2518077ad18483f48e77882d11983b537fa5f7cf158684d2c670fe4f1f" self.poet_public_key = \ signing.generate_pubkey(self._poet_private_key)
def _read_signing_keys(key_filename): """Reads the given file as a WIF formatted key. Args: key_filename: The filename where the key is stored. Returns: tuple (str, str): the public and private key pair Raises: CliException: If unable to read the file. """ filename = key_filename try: with open(filename, 'r') as key_file: signing_key = key_file.read().strip() pubkey = signing.generate_pubkey(signing_key) return pubkey, signing_key except IOError as e: raise CliException('Unable to read key file: {}'.format(str(e)))
def _sign_message_with_transaction(transaction, message_type, key): """ Signs a transaction message or transaction :param transaction (dict): :param key (str): A signing key returns message, txnid (tuple): The first 16 characters of a sha256 hexdigest. """ transaction['Nonce'] = time.time() pub = signing.generate_pubkey(key) transaction['PublicKey'] = pub sig = signing.sign(_dict2cbor(transaction), key) transaction['Signature'] = sig txnid = hashlib.sha256(transaction['Signature'].encode()).hexdigest()[:16] message = { 'Transaction': transaction, '__TYPE__': message_type, '__NONCE__': time.time(), } cbor_serialized_message = _dict2cbor(message) signature = signing.sign(cbor_serialized_message, key) message['__SIGNATURE__'] = signature return message, txnid
def do_init(args, config): username = config.get('DEFAULT', 'username') if args.username is not None: username = args.username config.set('DEFAULT', 'username', username) print("set username: {}".format(username)) save_config(config) priv_filename = config.get('DEFAULT', 'key_file') if priv_filename.endswith(".priv"): addr_filename = priv_filename[0:-len(".priv")] + ".addr" else: addr_filename = priv_filename + ".addr" if not os.path.exists(priv_filename): try: if not os.path.exists(os.path.dirname(priv_filename)): os.makedirs(os.path.dirname(priv_filename)) privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) addr = signing.generate_identifier(pubkey) with open(priv_filename, "w") as priv_fd: print("writing file: {}".format(priv_filename)) priv_fd.write(privkey) priv_fd.write("\n") with open(addr_filename, "w") as addr_fd: print("writing file: {}".format(addr_filename)) addr_fd.write(addr) addr_fd.write("\n") except IOError, ioe: raise BattleshipException("IOError: {}".format(str(ioe)))
def create_signup_info(cls, originator_public_key_hash, nonce): with cls._lock: # First we need to create a public/private key pair for the PoET # enclave to use. cls._poet_private_key = signing.generate_privkey() cls._poet_public_key = \ signing.generate_pubkey(cls._poet_private_key) cls._active_wait_timer = None # Simulate sealing (encrypting) the signup data. signup_data = { 'poet_public_key': cls._poet_public_key, 'poet_private_key': cls._poet_private_key } sealed_signup_data = \ base64.b64encode( dict2json(signup_data).encode()).decode('utf-8') # Build up a fake SGX quote containing: # 1. The basename # 2. The report body that contains: # a. The enclave measurement # b. The report data SHA256(SHA256(OPK)|PPK) sgx_basename = \ sgx_structs.SgxBasename(name=cls.__VALID_BASENAME__) sgx_measurement = \ sgx_structs.SgxMeasurement( m=cls.__VALID_ENCLAVE_MEASUREMENT__) hash_input = \ '{0}{1}'.format( originator_public_key_hash.upper(), cls._poet_public_key.upper()).encode() report_data = hashlib.sha256(hash_input).digest() sgx_report_data = sgx_structs.SgxReportData(d=report_data) sgx_report_body = \ sgx_structs.SgxReportBody( mr_enclave=sgx_measurement, report_data=sgx_report_data) sgx_quote = \ sgx_structs.SgxQuote( basename=sgx_basename, report_body=sgx_report_body) # Create a fake PSE manifest. A base64 encoding of the # originator public key hash should suffice. pse_manifest = \ base64.b64encode(originator_public_key_hash.encode()) timestamp = datetime.datetime.now().isoformat() # Fake our "proof" data. verification_report = { 'epidPseudonym': cls._anti_sybil_id, 'id': base64.b64encode( hashlib.sha256( timestamp.encode()).hexdigest().encode()).decode(), 'isvEnclaveQuoteStatus': 'OK', 'isvEnclaveQuoteBody': base64.b64encode(sgx_quote.serialize_to_bytes()).decode(), 'pseManifestStatus': 'OK', 'pseManifestHash': hashlib.sha256(base64.b64decode(pse_manifest)).hexdigest(), 'nonce': nonce, 'timestamp': timestamp } # Serialize the verification report, sign it, and then put # in the proof data verification_report_json = dict2json(verification_report) signature = \ cls._report_private_key.sign( verification_report_json.encode(), padding.PKCS1v15(), hashes.SHA256()) proof_data_dict = { 'evidence_payload': { 'pse_manifest': pse_manifest.decode() }, 'verification_report': verification_report_json, 'signature': base64.b64encode(signature).decode() } proof_data = dict2json(proof_data_dict) return \ EnclaveSignupInfo( poet_public_key=signup_data['poet_public_key'], proof_data=proof_data, anti_sybil_id=cls._anti_sybil_id, sealed_signup_data=sealed_signup_data)
def __init__(self, block_cache, block_sender, state_view_factory, executor, transaction_executor, chain_head_lock, on_chain_updated, squash_handler, chain_id_manager, state_delta_processor, identity_signing_key, data_dir, config_dir): """Initialize the ChainController Args: block_cache: The cache of all recent blocks and the processing state associated with them. block_sender: an interface object used to send blocks to the network. state_view_factory: The factory object to create executor: The thread pool to process block validations. transaction_executor: The TransactionExecutor used to produce schedulers for batch validation. chain_head_lock: Lock to hold while the chain head is being updated, this prevents other components that depend on the chain head and the BlockStore from having the BlockStore change under them. This lock is only for core Journal components (BlockPublisher and ChainController), other components should handle block not found errors from the BlockStore explicitly. on_chain_updated: The callback to call to notify the rest of the system the head block in the chain has been changed. squash_handler: a parameter passed when creating transaction schedulers. chain_id_manager: The ChainIdManager instance. state_delta_processor (:obj:`StateDeltaProcessor`): The state delta processor. identity_signing_key: Private key for signing blocks. data_dir: path to location where persistent data for the consensus module can be stored. config_dir: path to location where config data for the consensus module can be found. Returns: None """ self._lock = RLock() self._chain_head_lock = chain_head_lock self._block_cache = block_cache self._block_store = block_cache.block_store self._state_view_factory = state_view_factory self._block_sender = block_sender self._executor = executor self._transaction_executor = transaction_executor self._notify_on_chain_updated = on_chain_updated self._squash_handler = squash_handler self._identity_signing_key = identity_signing_key self._identity_public_key = \ signing.generate_pubkey(self._identity_signing_key) self._data_dir = data_dir self._config_dir = config_dir self._blocks_processing = {} # a set of blocks that are # currently being processed. self._blocks_pending = {} # set of blocks that the previous block # is being processed. Once that completes this block will be # scheduled for validation. self._chain_id_manager = chain_id_manager self._state_delta_processor = state_delta_processor try: self._chain_head = self._block_store.chain_head if self._chain_head is not None: LOGGER.info("Chain controller initialized with chain head: %s", self._chain_head) except Exception as exc: LOGGER.error("Invalid block store. Head of the block chain cannot " "be determined") LOGGER.exception(exc) raise self._notify_on_chain_updated(self._chain_head)
def setUp(self): self.private_key = signing.generate_privkey() self.public_key = signing.generate_pubkey(self.private_key)
def _private_to_public(private): return signing.generate_pubkey(private)
def main(args=None): if args is None: args = sys.argv[1:] opts = parse_args(args) verbose_level = opts.verbose # Determine if any args which support delimited lists should be # modified if opts.peers: opts.peers = _split_comma_append_args(opts.peers) if opts.seeds: opts.seeds = _split_comma_append_args(opts.seeds) init_console_logging(verbose_level=verbose_level) try: path_config = load_path_config(config_dir=opts.config_dir) except LocalConfigurationError as local_config_err: LOGGER.error(str(local_config_err)) sys.exit(1) try: opts_config = create_validator_config(opts) validator_config = \ load_validator_config(opts_config, path_config.config_dir) except LocalConfigurationError as local_config_err: LOGGER.error(str(local_config_err)) sys.exit(1) # Process initial initialization errors, delaying the sys.exit(1) until # all errors have been reported to the user (via LOGGER.error()). This # is intended to provide enough information to the user so they can correct # multiple errors before restarting the validator. init_errors = False try: identity_signing_key = load_identity_signing_key( key_dir=path_config.key_dir, key_name='validator') pubkey = signing.generate_pubkey(identity_signing_key) except LocalConfigurationError as e: log_configuration(log_dir=path_config.log_dir, name="validator") LOGGER.error(str(e)) init_errors = True log_config = get_log_config() if not init_errors: if log_config is not None: log_configuration(log_config=log_config) if log_config.get('root') is not None: init_console_logging(verbose_level=verbose_level) else: log_configuration(log_dir=path_config.log_dir, name="validator-" + pubkey[:8]) for line in path_config.to_toml_string(): LOGGER.info("config [path]: %s", line) if not check_directory(path=path_config.data_dir, human_readable_name='Data'): init_errors = True if not check_directory(path=path_config.log_dir, human_readable_name='Log'): init_errors = True endpoint = validator_config.endpoint if endpoint is None: # Need to use join here to get the string "0.0.0.0". Otherwise, # bandit thinks we are binding to all interfaces and returns a # Medium security risk. interfaces = ["*", ".".join(["0", "0", "0", "0"])] interfaces += netifaces.interfaces() endpoint = validator_config.bind_network for interface in interfaces: if interface in validator_config.bind_network: LOGGER.error("Endpoint must be set when using %s", interface) init_errors = True break if init_errors: LOGGER.error("Initialization errors occurred (see previous log " "ERROR messages), shutting down.") sys.exit(1) bind_network = validator_config.bind_network bind_component = validator_config.bind_component if "tcp://" not in bind_network: bind_network = "tcp://" + bind_network if "tcp://" not in bind_component: bind_component = "tcp://" + bind_component if validator_config.network_public_key is None or \ validator_config.network_private_key is None: LOGGER.warning("Network key pair is not configured, Network " "communications between validators will not be " "authenticated or encrypted.") validator = Validator(bind_network, bind_component, endpoint, validator_config.peering, validator_config.seeds, validator_config.peers, path_config.data_dir, path_config.config_dir, identity_signing_key, validator_config.network_public_key, validator_config.network_private_key) # pylint: disable=broad-except try: validator.start() except KeyboardInterrupt: LOGGER.info("Initiating graceful " "shutdown (press Ctrl+C again to force)") except LocalConfigurationError as local_config_err: LOGGER.error(str(local_config_err)) sys.exit(1) except GenesisError as genesis_err: LOGGER.error(str(genesis_err)) sys.exit(1) except Exception as e: LOGGER.exception(e) sys.exit(1) finally: validator.stop()