def __init__(self): self.block_sender = MockBlockSender() self.batch_sender = MockBatchSender() self.block_store = BlockStore(DictDatabase()) self.block_cache = BlockCache(self.block_store) self.state_db = {} # add the mock reference to the consensus self.state_db[_setting_address('sawtooth.consensus.algorithm')] = \ _setting_entry('sawtooth.consensus.algorithm', 'test_journal.mock_consensus') self.state_view_factory = MockStateViewFactory(self.state_db) self.signing_key = signing.generate_privkey() self.public_key = signing.encode_pubkey( signing.generate_pubkey(self.signing_key), "hex") self.identity_signing_key = signing.generate_privkey() self.genesis_block = self._generate_genesis_block() self.set_chain_head(self.genesis_block) self.block_publisher = BlockPublisher( transaction_executor=MockTransactionExecutor(), block_cache=self.block_cache, state_view_factory=self.state_view_factory, block_sender=self.block_sender, batch_sender=self.block_sender, squash_handler=None, chain_head=self.genesis_block, identity_signing_key=self.identity_signing_key, data_dir=None)
def test_invalid_signature(self): msg = "This is a message" priv = signer.generate_privkey() priv2 = signer.generate_privkey() sig = signer.sign(msg, priv) pub = signer.generate_pubkey(priv2) ver = signer.verify(msg, sig, pub) self.assertFalse(ver)
def _create_key(self, key_name='validator.wif'): privkey = signing.generate_privkey() wif_file = os.path.join(self._temp_dir, key_name) with open(wif_file, 'w') as wif_fd: wif_fd.write(signing.encode_privkey(privkey)) return signing.generate_pubkey(privkey)
def do_register_init(args, config): priv_key = signing.generate_privkey() pub_key = signing.generate_pubkey(priv_key) user_public_key = args.user_public_key user_name = args.user_name email_address = args.email_address authorized = args.authorized role = args.role cmd = "user list-user" cmd = shlex.split(cmd) process = subprocess.Popen(cmd, stdout=subprocess.PIPE) process.wait() output = '' for line in process.stdout: output += line.decode("utf-8").strip() if output == "[]" and role == "admin" and len(user_public_key) == 66: url = config.get('DEFAULT', 'url') client = UserBatch(base_url=url) response = client.register_user(user_public_key, user_name, email_address, authorized, role, priv_key, pub_key) print_msg(response) else: print(ret_access_denied__msg('Invalid operation.'))
def do_keygen(args): if args.key_name is not None: key_name = args.key_name else: key_name = getpass.getuser() if args.key_dir is not None: key_dir = args.key_dir if not os.path.exists(key_dir): raise CliException('no such directory: {}'.format(key_dir)) else: key_dir = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys') if not os.path.exists(key_dir): if not args.quiet: print('creating key directory: {}'.format(key_dir)) try: os.makedirs(key_dir) except IOError as e: raise CliException('IOError: {}'.format(str(e))) wif_filename = os.path.join(key_dir, key_name + '.wif') addr_filename = os.path.join(key_dir, key_name + '.addr') if not args.force: file_exists = False for filename in [wif_filename, addr_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') privkey = signing.generate_privkey() encoded = signing.encode_privkey(privkey) pubkey = signing.generate_pubkey(privkey) addr = signing.generate_identifier(pubkey) try: wif_exists = os.path.exists(wif_filename) with open(wif_filename, 'w') as wif_fd: if not args.quiet: if wif_exists: print('overwriting file: {}'.format(wif_filename)) else: print('writing file: {}'.format(wif_filename)) wif_fd.write(encoded) wif_fd.write('\n') addr_exists = os.path.exists(addr_filename) with open(addr_filename, 'w') as addr_fd: if not args.quiet: if addr_exists: print('overwriting file: {}'.format(addr_filename)) else: print('writing file: {}'.format(addr_filename)) addr_fd.write(addr) addr_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def load_identity_signing_key(key_dir, key_name): """Loads a private key from the key director, based on a validator's identity. Args: key_dir (str): The path to the key directory. key_name (str): The name of the key to load. Returns: str: the private signing key, in hex. """ key_path = os.path.join(key_dir, '{}.wif'.format(key_name)) if os.path.exists(key_path): LOGGER.debug('Found signing key %s', key_path) with open(key_path, 'r') as key_file: wif_key = key_file.read().strip() return signing.encode_privkey(signing.decode_privkey(wif_key), 'hex') else: LOGGER.info('No signing key found. Generating %s', key_path) priv_key = signing.generate_privkey() with open(key_path, 'w') as key_file: key_file.write(signing.encode_privkey(priv_key)) return signing.encode_privkey(priv_key, 'hex')
def test_pubkey_serialization(self): # pylint: disable=protected-access priv = signer.generate_privkey() pub = signer.generate_pubkey(priv) raw_pub = signer._decode_pubkey(pub, 'hex') pub2 = signer._encode_pubkey(raw_pub, 'hex') self.assertTrue(str(pub) == str(pub2))
def test_basic_ops(self): msg = 'this is a message' priv = signer.generate_privkey() pub = signer.generate_pubkey(priv) sig = signer.sign(msg, priv) ver = signer.verify(msg, sig, pub) self.assertTrue(ver)
def do_populate(args, batches, words): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) total_txn_count = 0 txns = [] for i in range(0, len(words)): name = list(words)[i] txn = create_intkey_transaction(verb='set', name=name, value=random.randint(9000, 100000), deps=[], private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) # Establish the signature of the txn associated with the word # so we can create good dependencies later words[name] = txn.header_signature batch = create_batch(transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch)
def do_populate(args): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) words = generate_word_list(args.pool_size) batches = [] total_txn_count = 0 txns = [] for i in range(0, len(words)): txn = create_intkey_transaction(verb='set', name=words[i], value=random.randint(9000, 100000), private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch(transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) batch_list = batch_pb2.BatchList(batches=batches) print("Writing to {}...".format(args.output)) with open(args.output, "wb") as fd: fd.write(batch_list.SerializeToString())
def __init__(self, test_name, tester): super().__init__(test_name) self.tester = tester self.private_key = signing.generate_privkey() self.public_key = signing.encode_pubkey( signing.generate_pubkey(self.private_key), "hex") self.factory = ValidatorRegistryMessageFactory( private=self.private_key, public=self.public_key)
def __init__(self, delegate, args): super(IntKeyWorkload, self).__init__(delegate, args) self._streams = [] self._pending_batches = {} self._lock = threading.Lock() self._delegate = delegate self._deps = {} self._private_key = signing.generate_privkey() self._public_key = signing.generate_pubkey(self._private_key)
def do_keygen(args): """Executes the key generation operation, given the parsed arguments. Args: args (:obj:`Namespace`): The parsed args. """ if args.key_name is not None: key_name = args.key_name else: key_name = 'validator' key_dir = get_key_dir() if not os.path.exists(key_dir): raise CliException("Key directory does not exist: {}".format(key_dir)) wif_filename = os.path.join(key_dir, key_name + '.wif') addr_filename = os.path.join(key_dir, key_name + '.addr') if not args.force: file_exists = False for filename in [wif_filename, addr_filename]: if os.path.exists(filename): file_exists = True print('file exists: {}'.format(filename), file=sys.stderr) if file_exists: raise CliException( 'files exist, rerun with --force to overwrite existing files') privkey = signing.generate_privkey() encoded = signing.encode_privkey(privkey) pubkey = signing.generate_pubkey(privkey) addr = signing.generate_identifier(pubkey) try: wif_exists = os.path.exists(wif_filename) with open(wif_filename, 'w') as wif_fd: if not args.quiet: if wif_exists: print('overwriting file: {}'.format(wif_filename)) else: print('writing file: {}'.format(wif_filename)) wif_fd.write(encoded) wif_fd.write('\n') addr_exists = os.path.exists(addr_filename) with open(addr_filename, 'w') as addr_fd: if not args.quiet: if addr_exists: print('overwriting file: {}'.format(addr_filename)) else: print('writing file: {}'.format(addr_filename)) addr_fd.write(addr) addr_fd.write('\n') except IOError as ioe: raise CliException('IOError: {}'.format(str(ioe)))
def get_keys(): # Get public and private key privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) userKeyJSON = "{}" keys = json.loads(userKeyJSON) keys["public_key"] = pubkey keys["private_key"] = privkey userKeyJSON = json.dumps(keys) return userKeyJSON
def test_set_status(self): """Tests that set_status() has the correct behavior. Basically: 1. Adds a batch which has two transactions. 2. Calls next_transaction() to get the first Transaction. 3. Calls next_transaction() to verify that it returns None. 4. Calls set_status() to mark the first transaction applied. 5. Calls next_transaction() to get the second Transaction. Step 3 returns None because the first transaction hasn't been marked as applied, and the SerialScheduler will only return one not-applied Transaction at a time. Step 5 is expected to return the second Transaction, not None, since the first Transaction was marked as applied in the previous step. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) txns = [] for name in ['a', 'b']: txn = create_transaction(name=name, private_key=private_key, public_key=public_key) txns.append(txn) batch = create_batch(transactions=txns, private_key=private_key, public_key=public_key) scheduler.add_batch(batch) scheduled_txn_info = scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEquals('a', scheduled_txn_info.txn.payload.decode()) self.assertIsNone(scheduler.next_transaction()) scheduler.set_transaction_execution_result( scheduled_txn_info.txn.header_signature, is_valid=False, context_id=None) scheduled_txn_info = scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEquals('b', scheduled_txn_info.txn.payload.decode())
def setUp(self): self.block_store = BlockStore({}) self.gossip = MockGossip() self.completer = Completer(self.block_store, self.gossip) self.completer._on_block_received = self._on_block_received self.completer._on_batch_received = self._on_batch_received self.private_key = signing.generate_privkey() self.public_key = signing.encode_pubkey( signing.generate_pubkey(self.private_key), "hex") self.blocks = [] self.batches = []
def test_transaction_order(self): """Tests the that transactions are returned in order added. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. This test also creates a second iterator and verifies that both iterators return the same transactions. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn = create_transaction(name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch(transactions=batch_txns, private_key=private_key, public_key=public_key) scheduler.add_batch(batch) scheduler.finalize() iterable1 = iter(scheduler) iterable2 = iter(scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertEqual(scheduled_txn_info, next(iterable2)) self.assertIsNotNone(scheduled_txn_info) self.assertEquals(txn.payload, scheduled_txn_info.txn.payload) scheduler.set_transaction_execution_result(txn.header_signature, False, None) with self.assertRaises(StopIteration): next(iterable1)
def __init__(self, test_name, tester): super().__init__(test_name) self.tester = tester self.private_key = signing.generate_privkey() self.public_key = signing.encode_pubkey( signing.generate_pubkey(self.private_key), "hex") self.factory = ValidatorRegistryMessageFactory( private=self.private_key, public=self.public_key) self._report_private_key = \ signing.encode_privkey( signing.decode_privkey( '5Jz5Kaiy3kCiHE537uXcQnJuiNJshf2bZZn43CrALMGoCd3zRuo', 'wif'), 'hex')
def _create_temporary_key_file(): """ A useful helper method for derived classes. Remember to close the returned temporary file so that it gets deleted. Returns: A NamedTemporaryFile object. """ key_file = NamedTemporaryFile() private_key = signing.generate_privkey() encoded_key = signing.encode_privkey(private_key) key_file.write(encoded_key) key_file.write('\n') key_file.flush() return key_file
def test_completion_on_last_result(self): """Tests the that the schedule is not marked complete until the last result is set. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. Test that the value of `complete` is false until the last value. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator, and the complete is true in the at the end. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn = create_transaction(name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch(transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertFalse(self.scheduler.complete(block=False)) self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) self.assertTrue(self.scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable1)
def test_completion_on_finalize(self): """Tests that iteration will stop when finalized is called on an otherwise complete scheduler. Adds one batch and transaction, then verifies the iterable returns that transaction. Sets the execution result and then calls finalize. Since the the scheduler is complete (all transactions have had results set, and it's been finalized), we should get a StopIteration. This check is useful in making sure the finalize() can occur after all set_transaction_execution_result()s have been performed, because in a normal situation, finalize will probably occur prior to those calls. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) txn = create_transaction(name='a', private_key=private_key, public_key=public_key) batch = create_batch(transactions=[txn], private_key=private_key, public_key=public_key) iterable = iter(scheduler) scheduler.add_batch(batch) scheduled_txn_info = next(iterable) self.assertIsNotNone(scheduled_txn_info) self.assertEquals(txn.payload, scheduled_txn_info.txn.payload) scheduler.set_transaction_execution_result(txn.header_signature, False, None) scheduler.finalize() with self.assertRaises(StopIteration): next(iterable)
def do_init(args, config): username = config.get('DEFAULT', 'username') if args.username is not None: username = args.username url = config.get('DEFAULT', 'url') if args.url is not None: url = args.url config.set('DEFAULT', 'username', username) print("set username: {}".format(username)) config.set('DEFAULT', 'url', url) print("set url: {}".format(url)) save_config(config) wif_filename = config.get('DEFAULT', 'key_file') if wif_filename.endswith(".wif"): addr_filename = wif_filename[0:-len(".wif")] + ".addr" else: addr_filename = wif_filename + ".addr" if not os.path.exists(wif_filename): try: if not os.path.exists(os.path.dirname(wif_filename)): os.makedirs(os.path.dirname(wif_filename)) privkey = signing.generate_privkey() encoded = signing.encode_privkey(privkey, 'wif') pubkey = signing.generate_pubkey(privkey) addr = signing.generate_identifier(pubkey) with open(wif_filename, "w") as wif_fd: print("writing file: {}".format(wif_filename)) wif_fd.write(encoded) wif_fd.write("\n") with open(addr_filename, "w") as addr_fd: print("writing file: {}".format(addr_filename)) addr_fd.write(addr) addr_fd.write("\n") except IOError as ioe: raise XoException("IOError: {}".format(str(ioe)))
def do_init(args, config): username = config.get('DEFAULT', 'username') if args.username is not None: username = args.username url = config.get('DEFAULT', 'url') if args.url is not None: url = args.url config.set('DEFAULT', 'username', username) print("set username: {}".format(username)) config.set('DEFAULT', 'url', url) print("set url: {}".format(url)) save_config(config) priv_filename = config.get('DEFAULT', 'key_file') if priv_filename.endswith(".priv"): addr_filename = priv_filename[0:-len(".priv")] + ".addr" else: addr_filename = priv_filename + ".addr" if not os.path.exists(priv_filename): try: if not os.path.exists(os.path.dirname(priv_filename)): os.makedirs(os.path.dirname(priv_filename)) privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) addr = signing.generate_identifier(pubkey) with open(priv_filename, "w") as priv_fd: print("writing file: {}".format(priv_filename)) priv_fd.write(privkey) priv_fd.write("\n") with open(addr_filename, "w") as addr_fd: print("writing file: {}".format(addr_filename)) addr_fd.write(addr) addr_fd.write("\n") except IOError as ioe: raise SupplyChainException("IOError: {}".format(str(ioe)))
def test_completion_on_finalize_only_when_done(self): """Tests that iteration will stop when finalized is called on an otherwise complete scheduler. Adds one batch and transaction, then verifies the iterable returns that transaction. Finalizes then sets the execution result. The schedule should not be marked as complete. This check is useful in making sure the finalize() can occur after all set_transaction_execution_result()s have been performed, because in a normal situation, finalize will probably occur prior to those calls. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txn = create_transaction(name='a', private_key=private_key, public_key=public_key) batch = create_batch(transactions=[txn], private_key=private_key, public_key=public_key) iterable = iter(self.scheduler) self.scheduler.add_batch(batch) scheduled_txn_info = next(iterable) self.assertIsNotNone(scheduled_txn_info) self.assertEquals(txn.payload, scheduled_txn_info.txn.payload) self.scheduler.finalize() self.assertFalse(self.scheduler.complete(block=False)) self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) self.assertTrue(self.scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable)
def do_init(args, config): username = args.username \ if args.username else config.get('DEFAULT', 'username') url = args.url if args.url else config.get('DEFAULT', 'url') config.set('DEFAULT', 'username', username) config.set('DEFAULT', 'url', url) print("set username: %s" % username) print("set url: %s" % url) save_config(config) priv_filename = config.get('DEFAULT', 'key_file') if priv_filename.endswith(".priv"): pubkey_filename = priv_filename[0:-len(".priv")] + ".pub" else: pubkey_filename = priv_filename + ".pub" if not os.path.exists(priv_filename): try: if not os.path.exists(os.path.dirname(priv_filename)): os.makedirs(os.path.dirname(priv_filename)) privkey = signing.generate_privkey() pubkey = signing.generate_pubkey(privkey) with open(priv_filename, "w") as priv_fd: print("writing file: {}".format(priv_filename)) priv_fd.write(privkey) priv_fd.write("\n") with open(pubkey_filename, "w") as pubkey_fd: print("writing file: {}".format(pubkey_filename)) pubkey_fd.write(pubkey) pubkey_fd.write("\n") except IOError as ioe: raise BattleshipException("IOError: {}".format(str(ioe)))
def do_generate(args, batches, words): private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) start = time.time() total_txn_count = 0 for i in range(0, args.count): txns = [] for _ in range(0, random.randint(1, args.batch_max_size)): name = random.choice(list(words)) txn = create_intkey_transaction(verb=random.choice(['inc', 'dec']), name=name, value=1, deps=[words[name]], private_key=private_key, public_key=public_key) total_txn_count += 1 txns.append(txn) batch = create_batch(transactions=txns, private_key=private_key, public_key=public_key) batches.append(batch) if i % 100 == 0 and i != 0: stop = time.time() txn_count = 0 for batch in batches[-100:]: txn_count += len(batch.transactions) fmt = 'batches {}, batch/sec: {:.2f}, txns: {}, txns/sec: {:.2f}' print( fmt.format(str(i), 100 / (stop - start), str(total_txn_count), txn_count / (stop - start))) start = stop
def test_valid_batch_invalid_batch(self): """Tests the squash function. That the correct hash is being used for each txn and that the batch ending state hash is being set. Basically: 1. Adds two batches, one where all the txns are valid, and one where one of the txns is invalid. 2. Run through the scheduler executor interaction as txns are processed. 3. Verify that the valid state root is obtained through the squash function. 4. Verify that correct batch statuses are set """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) # 1) batch_signatures = [] for names in [['a', 'b'], ['invalid', 'c']]: batch_txns = [] for name in names: txn = create_transaction(name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) batch = create_batch(transactions=batch_txns, private_key=private_key, public_key=public_key) batch_signatures.append(batch.header_signature) self.scheduler.add_batch(batch) self.scheduler.finalize() # 2) sched1 = iter(self.scheduler) invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest() while not self.scheduler.complete(block=False): txn_info = next(sched1) txn_header = transaction_pb2.TransactionHeader() txn_header.ParseFromString(txn_info.txn.header) inputs_or_outputs = list(txn_header.inputs) c_id = self.context_manager.create_context( state_hash=txn_info.state_hash, inputs=inputs_or_outputs, outputs=inputs_or_outputs, base_contexts=txn_info.base_context_ids) if txn_header.payload_sha512 == invalid_payload: self.scheduler.set_transaction_execution_result( txn_info.txn.header_signature, False, c_id) else: self.context_manager.set(c_id, [{inputs_or_outputs[0]: 1}]) self.scheduler.set_transaction_execution_result( txn_info.txn.header_signature, True, c_id) sched2 = iter(self.scheduler) # 3) txn_info_a = next(sched2) self.assertEquals(self.first_state_root, txn_info_a.state_hash) txn_a_header = transaction_pb2.TransactionHeader() txn_a_header.ParseFromString(txn_info_a.txn.header) inputs_or_outputs = list(txn_a_header.inputs) address_a = inputs_or_outputs[0] c_id_a = self.context_manager.create_context( state_hash=self.first_state_root, inputs=inputs_or_outputs, outputs=inputs_or_outputs, base_contexts=txn_info_a.base_context_ids) self.context_manager.set(c_id_a, [{address_a: 1}]) state_root2 = self.context_manager.commit_context([c_id_a], virtual=False) txn_info_b = next(sched2) self.assertEquals(txn_info_b.state_hash, state_root2) txn_b_header = transaction_pb2.TransactionHeader() txn_b_header.ParseFromString(txn_info_b.txn.header) inputs_or_outputs = list(txn_b_header.inputs) address_b = inputs_or_outputs[0] c_id_b = self.context_manager.create_context( state_hash=state_root2, inputs=inputs_or_outputs, outputs=inputs_or_outputs, base_contexts=txn_info_b.base_context_ids) self.context_manager.set(c_id_b, [{address_b: 1}]) state_root3 = self.context_manager.commit_context([c_id_b], virtual=False) txn_infoInvalid = next(sched2) self.assertEquals(txn_infoInvalid.state_hash, state_root3) txn_info_c = next(sched2) self.assertEquals(txn_info_c.state_hash, state_root3) # 4) batch1_result = self.scheduler.get_batch_execution_result( batch_signatures[0]) self.assertTrue(batch1_result.is_valid) self.assertEquals(batch1_result.state_hash, state_root3) batch2_result = self.scheduler.get_batch_execution_result( batch_signatures[1]) self.assertFalse(batch2_result.is_valid) self.assertIsNone(batch2_result.state_hash)
def test_add_batch_after_empty_iteration(self): """Tests that iterations will continue as result of add_batch(). This test calls next() on a scheduler iterator in a separate thread called the IteratorThread. The test waits until the IteratorThread is waiting in next(); internal to the scheduler, it will be waiting on a condition variable as there are no transactions to return and the scheduler is not finalized. Then, the test continues by running add_batch(), which should cause the next() running in the IterableThread to return a transaction. This demonstrates the scheduler's ability to wait on an empty iterator but continue as transactions become available via add_batch. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) # Create a basic transaction and batch. txn = create_transaction(name='a', private_key=private_key, public_key=public_key) batch = create_batch(transactions=[txn], private_key=private_key, public_key=public_key) # This class is used to run the scheduler's iterator. class IteratorThread(threading.Thread): def __init__(self, iterable): threading.Thread.__init__(self) self._iterable = iterable self.ready = False self.condition = threading.Condition() self.txn_info = None def run(self): # Even with this lock here, there is a race condition between # exit of the lock and entry into the iterable. That is solved # by sleep later in the test. with self.condition: self.ready = True self.condition.notify() txn_info = next(self._iterable) with self.condition: self.txn_info = txn_info self.condition.notify() # This is the iterable we are testing, which we will use in the # IteratorThread. We also use it in this thread below to test # for StopIteration. iterable = iter(self.scheduler) # Create and startup thread. thread = IteratorThread(iterable=iterable) thread.start() # Pause here to make sure the thread is absolutely as far along as # possible; in other words, right before we call next() in it's run() # method. When this returns, there should be very little time until # the iterator is blocked on a condition variable. with thread.condition: while not thread.ready: thread.condition.wait() # May the daemons stay away during this dark time, and may we be # forgiven upon our return. time.sleep(1) # At this point, the IteratorThread should be waiting next(), so we go # ahead and give it a batch. self.scheduler.add_batch(batch) # If all goes well, thread.txn_info will get set to the result of the # next() call. If not, it will timeout and thread.txn_info will be # empty. with thread.condition: if thread.txn_info is None: thread.condition.wait(5) # If thread.txn_info is empty, the test failed as iteration did not # continue after add_batch(). self.assertIsNotNone(thread.txn_info, "iterable failed to return txn") self.assertEquals(txn.payload, thread.txn_info.txn.payload) # Continue with normal shutdown/cleanup. self.scheduler.finalize() self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) with self.assertRaises(StopIteration): next(iterable)
def _private(): return signing.generate_privkey()
def create_signup_info(cls, originator_public_key_hash, most_recent_wait_certificate_id): with cls._lock: # First we need to create a public/private key pair for the PoET # enclave to use. cls._poet_private_key = signing.generate_privkey() cls._poet_public_key = \ signing.generate_pubkey(cls._poet_private_key) cls._active_wait_timer = None # We are going to fake out sealing the signup data. Note that # the signing module uses strings for both private (WIF encoded) # and public (hex encoded) key canonical formats. Therefore, we # don't have to encode before putting in the signup data. This # also means that on the flip side (unsealing signup data and # verifying signatures using public keys), we don't have to decode # before using. signup_data = { 'poet_public_key': cls._poet_public_key, 'poet_private_key': cls._poet_private_key } sealed_signup_data = \ base64.b64encode(bytes(dict2json(signup_data).encode())) # Build up a fake SGX quote containing: # 1. The basename # 2. The report body that contains: # a. The enclave measurement # b. The report data SHA256(SHA256(OPK)|PPK) sgx_basename = \ sgx_structs.SgxBasename(name=cls.__VALID_BASENAME__) sgx_measurement = \ sgx_structs.SgxMeasurement( m=cls.__VALID_ENCLAVE_MEASUREMENT__) hash_input = \ '{0}{1}'.format( originator_public_key_hash.upper(), cls._poet_public_key.upper()).encode() report_data = hashlib.sha256(hash_input).digest() sgx_report_data = sgx_structs.SgxReportData(d=report_data) sgx_report_body = \ sgx_structs.SgxReportBody( mr_enclave=sgx_measurement, report_data=sgx_report_data) sgx_quote = \ sgx_structs.SgxQuote( basename=sgx_basename, report_body=sgx_report_body) # Create a fake PSE manifest. A base64 encoding of the # originator public key hash should suffice. pse_manifest = \ base64.b64encode(originator_public_key_hash.encode()) timestamp = datetime.datetime.now().isoformat() # Fake our "proof" data. verification_report = { 'epidPseudonym': originator_public_key_hash, 'id': base64.b64encode( hashlib.sha256( timestamp.encode()).hexdigest().encode()).decode(), 'isvEnclaveQuoteStatus': 'OK', 'isvEnclaveQuoteBody': base64.b64encode(sgx_quote.serialize_to_bytes()).decode(), 'pseManifestStatus': 'OK', 'pseManifestHash': base64.b64encode( hashlib.sha256( pse_manifest).hexdigest().encode()).decode(), 'nonce': most_recent_wait_certificate_id, 'timestamp': timestamp } # Serialize the verification report, sign it, and then put # in the proof data verification_report_json = dict2json(verification_report) signature = \ cls._report_private_key.sign( verification_report_json.encode(), padding.PKCS1v15(), hashes.SHA256()) proof_data_dict = { 'evidence_payload': { 'pse_manifest': pse_manifest.decode() }, 'verification_report': verification_report_json, 'signature': base64.b64encode(signature).decode() } proof_data = dict2json(proof_data_dict) return \ EnclaveSignupInfo( poet_public_key=signup_data['poet_public_key'], proof_data=proof_data, anti_sybil_id=originator_public_key_hash, sealed_signup_data=sealed_signup_data)
class _PoetEnclaveSimulator(object): # A lock to protect threaded access _lock = threading.Lock() # The private key we generate to sign the certificate ID when creating # the random wait timeout value _seal_private_key = signing.generate_privkey() _seal_public_key = signing.generate_pubkey(_seal_private_key) # The basename and enclave measurement values we will put into and verify # are in the enclave quote in the attestation verification report. __VALID_BASENAME__ = \ bytes.fromhex( 'b785c58b77152cbe7fd55ee3851c4990' '00000000000000000000000000000000') __VALID_ENCLAVE_MEASUREMENT__ = \ bytes.fromhex( 'c99f21955e38dbb03d2ca838d3af6e43' 'ef438926ed02db4cc729380c8c7a174e') # We use the report private key PEM to create the private key used to # sign attestation verification reports. On the flip side, the report # public key PEM is used to create the public key used to verify the # signature on the attestation verification reports. __REPORT_PRIVATE_KEY_PEM__ = \ '-----BEGIN PRIVATE KEY-----\n' \ 'MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCsy/NmLwZP6Uj0\n' \ 'p5mIiefgK8VOK7KJ34g3h0/X6aFOd/Ff4j+e23wtQpkxsjVHWLM5SjElGhfpVDhL\n' \ '1WAMsQI9bpCWR4sjV6p7gOJhv34nkA2Grj5eSHCAJRQXCl+pJ9dYIeKaNoaxkdtq\n' \ '+Xme//ohtkkv/ZjMTfsjMl0RLXokJ+YhSuTpNSovRaCtZfLB5MihVJuV3Qzb2ROh\n' \ 'KQxcuyPy9tBtOIrBWJaFiXOLRxAijs+ICyzrqUBbRfoAztkljIBx9KNItHiC4zPv\n' \ 'o6DxpGSO2yMQSSrs13PkfyGWVZSgenEYOouEz07X+H5B29PPuW5mCl4nkoH3a9gv\n' \ 'rI6VLEx9AgMBAAECggEAImfFge4RCq4/eX85gcc7pRXyBjuLJAqe+7d0fWAmXxJg\n' \ 'vB+3XTEEi5p8GDoMg7U0kk6kdGe6pRnAz9CffEduU78FCPcbzCCzcD3cVWwkeUok\n' \ 'd1GQV4OC6vD3DBNjsrGdHg45KU18CjUphCZCQhdjvXynG+gZmWxZecuYXkg4zqPT\n' \ 'LwOkcdWBPhJ9CbjtiYOtKDZbhcbdfnb2fkxmvnAoz1OWNfVFXh+x7651FrmL2Pga\n' \ 'xGz5XoxFYYT6DWW1fL6GNuVrd97wkcYUcjazMgunuUMC+6XFxqK+BoqnxeaxnsSt\n' \ 'G2r0sdVaCyK1sU41ftbEQsc5oYeQ3v5frGZL+BgrYQKBgQDgZnjqnVI/B+9iarx1\n' \ 'MjAFyhurcKvFvlBtGKUg9Q62V6wI4VZvPnzA2zEaR1J0cZPB1lCcMsFACpuQF2Mr\n' \ '3VDyJbnpSG9q05POBtfLjGQdXKtGb8cfXY2SwjzLH/tvxHm3SP+RxvLICQcLX2/y\n' \ 'GTJ+mY9C6Hs6jIVLOnMWkRWamQKBgQDFITE3Qs3Y0ZwkKfGQMKuqJLRw29Tyzw0n\n' \ 'XKaVmO/pEzYcXZMPBrFhGvdmNcJLo2fcsmGZnmit8RP4ChwHUlD11dH1Ffqw9FWc\n' \ '387i0chlE5FhQPirSM8sWFVmjt2sxC4qFWJoAD/COQtKHgEaVKVc4sH/yRostL1C\n' \ 'r+7aWuqzhQKBgQDcuC5LJr8VPGrbtPz1kY3mw+r/cG2krRNSm6Egj6oO9KFEgtCP\n' \ 'zzjKQU9E985EtsqNKI5VdR7cLRLiYf6r0J6j7zO0IAlnXADP768miUqYDuRw/dUw\n' \ 'JsbwCZneefDI+Mp325d1/egjla2WJCNqUBp4p/Zf62f6KOmbGzzEf6RuUQKBgG2y\n' \ 'E8YRiaTOt5m0MXUwcEZk2Hg5DF31c/dkalqy2UYU57aPJ8djzQ8hR2x8G9ulWaWJ\n' \ 'KiCm8s9gaOFNFt3II785NfWxPmh7/qwmKuUzIdWFNxAsbHQ8NvURTqyccaSzIpFO\n' \ 'hw0inlhBEBQ1cB2r3r06fgQNb2BTT0Itzrd5gkNVAoGBAJcMgeKdBMukT8dKxb4R\n' \ '1PgQtFlR3COu2+B00pDyUpROFhHYLw/KlUv5TKrH1k3+E0KM+winVUIcZHlmFyuy\n' \ 'Ilquaova1YSFXP5cpD+PKtxRV76Qlqt6o+aPywm81licdOAXotT4JyJhrgz9ISnn\n' \ 'J13KkHoAZ9qd0rX7s37czb3O\n' \ '-----END PRIVATE KEY-----' __REPORT_PUBLIC_KEY_PEM__ = \ '-----BEGIN PUBLIC KEY-----\n' \ 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArMvzZi8GT+lI9KeZiInn\n' \ '4CvFTiuyid+IN4dP1+mhTnfxX+I/ntt8LUKZMbI1R1izOUoxJRoX6VQ4S9VgDLEC\n' \ 'PW6QlkeLI1eqe4DiYb9+J5ANhq4+XkhwgCUUFwpfqSfXWCHimjaGsZHbavl5nv/6\n' \ 'IbZJL/2YzE37IzJdES16JCfmIUrk6TUqL0WgrWXyweTIoVSbld0M29kToSkMXLsj\n' \ '8vbQbTiKwViWhYlzi0cQIo7PiAss66lAW0X6AM7ZJYyAcfSjSLR4guMz76Og8aRk\n' \ 'jtsjEEkq7Ndz5H8hllWUoHpxGDqLhM9O1/h+QdvTz7luZgpeJ5KB92vYL6yOlSxM\n' \ 'fQIDAQAB\n' \ '-----END PUBLIC KEY-----' _report_private_key = \ serialization.load_pem_private_key( __REPORT_PRIVATE_KEY_PEM__.encode(), password=None, backend=backends.default_backend()) _report_public_key = \ serialization.load_pem_public_key( __REPORT_PUBLIC_KEY_PEM__.encode(), backend=backends.default_backend()) # The anti-sybil ID for this particular validator. This will get set when # the enclave is initialized _anti_sybil_id = None # The PoET keys will remain unset until signup info is either created or # unsealed _poet_public_key = None _poet_private_key = None _active_wait_timer = None @classmethod def initialize(cls, **kwargs): # Create an anti-Sybil ID that is unique for this validator cls._anti_sybil_id = \ hashlib.sha256( kwargs.get( 'NodeName', datetime.datetime.now().isoformat()).encode()).hexdigest() @classmethod def create_signup_info(cls, originator_public_key_hash, most_recent_wait_certificate_id): with cls._lock: # First we need to create a public/private key pair for the PoET # enclave to use. cls._poet_private_key = signing.generate_privkey() cls._poet_public_key = \ signing.generate_pubkey(cls._poet_private_key) cls._active_wait_timer = None # We are going to fake out sealing the signup data. Note that # the signing module uses strings for both private (WIF encoded) # and public (hex encoded) key canonical formats. Therefore, we # don't have to encode before putting in the signup data. This # also means that on the flip side (unsealing signup data and # verifying signatures using public keys), we don't have to decode # before using. signup_data = { 'poet_public_key': cls._poet_public_key, 'poet_private_key': cls._poet_private_key } sealed_signup_data = \ base64.b64encode(bytes(dict2json(signup_data).encode())) # Build up a fake SGX quote containing: # 1. The basename # 2. The report body that contains: # a. The enclave measurement # b. The report data SHA256(SHA256(OPK)|PPK) sgx_basename = \ sgx_structs.SgxBasename(name=cls.__VALID_BASENAME__) sgx_measurement = \ sgx_structs.SgxMeasurement( m=cls.__VALID_ENCLAVE_MEASUREMENT__) hash_input = \ '{0}{1}'.format( originator_public_key_hash.upper(), cls._poet_public_key.upper()).encode() report_data = hashlib.sha256(hash_input).digest() sgx_report_data = sgx_structs.SgxReportData(d=report_data) sgx_report_body = \ sgx_structs.SgxReportBody( mr_enclave=sgx_measurement, report_data=sgx_report_data) sgx_quote = \ sgx_structs.SgxQuote( basename=sgx_basename, report_body=sgx_report_body) # Create a fake PSE manifest. A base64 encoding of the # originator public key hash should suffice. pse_manifest = \ base64.b64encode(originator_public_key_hash.encode()) timestamp = datetime.datetime.now().isoformat() # Fake our "proof" data. verification_report = { 'epidPseudonym': originator_public_key_hash, 'id': base64.b64encode( hashlib.sha256( timestamp.encode()).hexdigest().encode()).decode(), 'isvEnclaveQuoteStatus': 'OK', 'isvEnclaveQuoteBody': base64.b64encode(sgx_quote.serialize_to_bytes()).decode(), 'pseManifestStatus': 'OK', 'pseManifestHash': base64.b64encode( hashlib.sha256( pse_manifest).hexdigest().encode()).decode(), 'nonce': most_recent_wait_certificate_id, 'timestamp': timestamp } # Serialize the verification report, sign it, and then put # in the proof data verification_report_json = dict2json(verification_report) signature = \ cls._report_private_key.sign( verification_report_json.encode(), padding.PKCS1v15(), hashes.SHA256()) proof_data_dict = { 'evidence_payload': { 'pse_manifest': pse_manifest.decode() }, 'verification_report': verification_report_json, 'signature': base64.b64encode(signature).decode() } proof_data = dict2json(proof_data_dict) return \ EnclaveSignupInfo( poet_public_key=signup_data['poet_public_key'], proof_data=proof_data, anti_sybil_id=originator_public_key_hash, sealed_signup_data=sealed_signup_data) @classmethod def deserialize_signup_info(cls, serialized_signup_info): return \ EnclaveSignupInfo.signup_info_from_serialized( serialized_signup_info=serialized_signup_info) @classmethod def unseal_signup_data(cls, sealed_signup_data): """ Args: sealed_signup_data: Sealed signup data that was returned previously in a EnclaveSignupInfo object from a call to create_signup_info Returns: A string The hex encoded PoET public key that was extracted from the sealed data """ # Reverse the process we used in creating "sealed" signup info. # Specifically, we will do a base 64 decode, which gives us JSON # we can convert back to a dictionary we can use to get the # data we need signup_data = \ json2dict(base64.b64decode(sealed_signup_data).decode()) # Since the signing module uses strings for both private (WIF encoded) # and public (hex encoded) key canonical formats, we don't have to # decode. with cls._lock: cls._poet_public_key = str(signup_data.get('poet_public_key')) cls._poet_private_key = str(signup_data.get('poet_private_key')) cls._active_wait_timer = None return signup_data.get('poet_public_key') @classmethod def verify_signup_info(cls, signup_info, originator_public_key_hash, most_recent_wait_certificate_id): # Verify the attestation verification report signature proof_data_dict = json2dict(signup_info.proof_data) verification_report = proof_data_dict.get('verification_report') if verification_report is None: raise ValueError('Verification report is missing from proof data') signature = proof_data_dict.get('signature') if signature is None: raise ValueError('Signature is missing from proof data') try: cls._report_public_key.verify(base64.b64decode(signature.encode()), verification_report.encode(), padding.PKCS1v15(), hashes.SHA256()) except InvalidSignature: raise ValueError('Verification report signature is invalid') verification_report_dict = json2dict(verification_report) # Verify that the verification report contains an ID field if 'id' not in verification_report_dict: raise ValueError('Verification report does not contain an ID') # Verify that the verification report contains an EPID pseudonym and # that it matches the anti-Sybil ID epid_pseudonym = verification_report_dict.get('epidPseudonym') if epid_pseudonym is None: raise \ ValueError( 'Verification report does not contain an EPID pseudonym') if epid_pseudonym != signup_info.anti_sybil_id: raise \ ValueError( 'The anti-Sybil ID in the verification report [{0}] does ' 'not match the one contained in the signup information ' '[{1}]'.format( epid_pseudonym, signup_info.anti_sybil_id)) # Verify that the verification report contains a PSE manifest status # and it is OK pse_manifest_status = \ verification_report_dict.get('pseManifestStatus') if pse_manifest_status is None: raise \ ValueError( 'Verification report does not contain a PSE manifest ' 'status') if pse_manifest_status.upper() != 'OK': raise \ ValueError( 'PSE manifest status is {} (i.e., not OK)'.format( pse_manifest_status)) # Verify that the verification report contains a PSE manifest hash pse_manifest_hash = \ verification_report_dict.get('pseManifestHash') if pse_manifest_hash is None: raise \ ValueError( 'Verification report does not contain a PSE manifest ' 'hash') # Verify that the proof data contains evidence payload evidence_payload = proof_data_dict.get('evidence_payload') if evidence_payload is None: raise ValueError('Evidence payload is missing from proof data') # Verify that the evidence payload contains a PSE manifest and then # use it to make sure that the PSE manifest hash is what we expect pse_manifest = evidence_payload.get('pse_manifest') if pse_manifest is None: raise ValueError('Evidence payload does not include PSE manifest') expected_pse_manifest_hash = \ base64.b64encode( hashlib.sha256( pse_manifest.encode()).hexdigest().encode()).decode() if pse_manifest_hash.upper() != expected_pse_manifest_hash.upper(): raise \ ValueError( 'PSE manifest hash {0} does not match {1}'.format( pse_manifest_hash, expected_pse_manifest_hash)) # Verify that the verification report contains an enclave quote status # and the status is OK enclave_quote_status = \ verification_report_dict.get('isvEnclaveQuoteStatus') if enclave_quote_status is None: raise \ ValueError( 'Verification report does not contain an enclave quote ' 'status') if enclave_quote_status.upper() != 'OK': raise \ ValueError( 'Enclave quote status is {} (i.e., not OK)'.format( enclave_quote_status)) # Verify that the verification report contains an enclave quote enclave_quote = verification_report_dict.get('isvEnclaveQuoteBody') if enclave_quote is None: raise \ ValueError( 'Verification report does not contain an enclave quote') # The ISV enclave quote body is base 64 encoded, so decode it and then # create an SGX quote structure from it so we can inspect sgx_quote = sgx_structs.SgxQuote() sgx_quote.parse_from_bytes(base64.b64decode(enclave_quote)) # The report body should be SHA256(SHA256(OPK)|PPK) # # NOTE - since the code that created the report data is in the enclave # code, this code needs to be kept in sync with it. Any changes to how # the report data is created, needs to be reflected in how we re-create # the report data for verification. hash_input = \ '{0}{1}'.format( originator_public_key_hash.upper(), cls._poet_public_key.upper()).encode() hash_value = hashlib.sha256(hash_input).digest() expected_report_data = \ hash_value + \ (b'\x00' * (sgx_structs.SgxReportData.STRUCT_SIZE - len(hash_value))) if sgx_quote.report_body.report_data.d != expected_report_data: raise \ ValueError( 'AVR report data [{0}] not equal to [{1}]'.format( sgx_quote.report_body.report_data.d.hex(), expected_report_data.hex())) # Compare the enclave measurement against the expected valid enclave # measurement. # # NOTE - this is only a temporary check. Instead of checking against # a predefined enclave measurement value, we should be configured with # a set of one or more enclave measurement values that we will # consider as valid. if sgx_quote.report_body.mr_enclave.m != \ cls.__VALID_ENCLAVE_MEASUREMENT__: raise \ ValueError( 'AVR enclave measurement [{0}] not equal to [{1}]'.format( sgx_quote.report_body.mr_enclave.m.hex(), cls.__VALID_ENCLAVE_MEASUREMENT__.hex())) # Compare the enclave basename in the verification report against the # expected enclave basename. # # NOTE - this is only a temporary check. Instead of checking against # a predefined enclave basenme value, we should be configured with a # set of one or more enclave basenames that we will consider as valid. if sgx_quote.basename.name != cls.__VALID_BASENAME__: raise \ ValueError( 'AVR enclave basename [{0}] not equal to [{1}]'.format( sgx_quote.basename.name.hex(), cls.__VALID_BASENAME__.hex())) # Verify that the wait certificate ID in the verification report # matches the provided wait certificate ID. The wait certificate ID # is stored in the nonce field. nonce = verification_report_dict.get('nonce') if nonce is None: raise \ ValueError( 'Verification report does not have a nonce') # NOTE - this check is currently not performed as a transaction # does not have a good way to obtaining the most recent # wait certificate ID. # # if nonce != most_recent_wait_certificate_id: # raise \ # ValueError( # 'Attestation evidence payload nonce {0} does not match ' # 'most-recently-committed wait certificate ID {1}'.format( # nonce, # most_recent_wait_certificate_id)) @classmethod def create_wait_timer(cls, validator_address, previous_certificate_id, local_mean, minimum_wait_time): with cls._lock: # If we don't have a PoET private key, then the enclave has not # been properly initialized (either by calling create_signup_info # or unseal_signup_data) if cls._poet_private_key is None: raise \ ValueError( 'Enclave must be initialized before attempting to ' 'create a wait timer') # Create some value from the cert ID. We are just going to use # the seal key to sign the cert ID. We will then use the # low-order 64 bits to change that to a number [0, 1] tag = \ base64.b64decode( signing.sign( previous_certificate_id, cls._seal_private_key)) tagd = float(struct.unpack('Q', tag[-8:])[0]) / (2**64 - 1) # Now compute the duration with a minimum wait time guaranteed duration = minimum_wait_time - local_mean * math.log(tagd) # Create and sign the wait timer wait_timer = \ EnclaveWaitTimer( validator_address=validator_address, duration=duration, previous_certificate_id=previous_certificate_id, local_mean=local_mean) wait_timer.signature = \ signing.sign( wait_timer.serialize(), cls._poet_private_key) # Keep track of the active wait timer cls._active_wait_timer = wait_timer return wait_timer @classmethod def deserialize_wait_timer(cls, serialized_timer, signature): with cls._lock: # Verify the signature before trying to deserialize if not signing.verify(serialized_timer, signature, cls._poet_public_key): return None return \ EnclaveWaitTimer.wait_timer_from_serialized( serialized_timer=serialized_timer, signature=signature) @classmethod def create_wait_certificate(cls, wait_timer, block_hash): with cls._lock: # If we don't have a PoET private key, then the enclave has not # been properly initialized (either by calling create_signup_info # or unseal_signup_data) if cls._poet_private_key is None: raise \ ValueError( 'Enclave must be initialized before attempting to ' 'create a wait certificate') # Several criteria need to be met before we can create a wait # certificate: # 1. We have an active timer # 2. The caller's wait timer is the active wait timer. We are not # going to rely the objects, being the same, but will compute # a signature over the object and verify that the signatures # are the same. # 3. The active timer has expired # 4. The active timer has not timed out # # Note - we make a concession for the genesis block (i.e., a wait # timer for which the previous certificate ID is the Null # identifier) in that we don't require the timer to have expired # and we don't worry about the timer having timed out. if cls._active_wait_timer is None: raise \ ValueError( 'There is not a current enclave active wait timer') if wait_timer is None or \ cls._active_wait_timer.signature != \ signing.sign( wait_timer.serialize(), cls._poet_private_key): raise \ ValueError( 'Validator is not using the current wait timer') is_not_genesis_block = \ (cls._active_wait_timer.previous_certificate_id != NULL_BLOCK_IDENTIFIER) now = time.time() expire_time = \ cls._active_wait_timer.request_time + \ cls._active_wait_timer.duration if is_not_genesis_block and now < expire_time: raise \ ValueError( 'Cannot create wait certificate because timer has ' 'not expired') time_out_time = \ cls._active_wait_timer.request_time + \ cls._active_wait_timer.duration + \ TIMER_TIMEOUT_PERIOD if is_not_genesis_block and time_out_time < now: raise \ ValueError( 'Cannot create wait certificate because timer ' 'has timed out') # Create a random nonce for the certificate. For our "random" # nonce we will take the timer signature, concat that with the # current time, JSON-ize it and create a SHA-256 hash over it. # Probably not considered random by security professional # standards, but it is good enough for the simulator. random_string = \ dict2json({ 'wait_timer_signature': cls._active_wait_timer.signature, 'now': datetime.datetime.utcnow().isoformat() }) nonce = hashlib.sha256(random_string.encode()).hexdigest() # First create a new enclave wait certificate using the data # provided and then sign the certificate with the PoET private key wait_certificate = \ EnclaveWaitCertificate.wait_certificate_with_wait_timer( wait_timer=cls._active_wait_timer, nonce=nonce, block_hash=block_hash) wait_certificate.signature = \ signing.sign( wait_certificate.serialize(), cls._poet_private_key) # Now that we have created the certificate, we no longer have an # active timer cls._active_wait_timer = None return wait_certificate @classmethod def deserialize_wait_certificate(cls, serialized_certificate, signature): return \ EnclaveWaitCertificate.wait_certificate_from_serialized( serialized_certificate=serialized_certificate, signature=signature) @classmethod def verify_wait_certificate(cls, certificate, poet_public_key): # Since the signing module uses a hex-encoded string as the canonical # format for public keys and we should be handed a public key that was # part of signup information created by us, don't bother decoding # the public key. if not \ signing.verify( certificate.serialize(), certificate.signature, poet_public_key): raise ValueError('Wait certificate signature does not match')