def test_set_status(self): """Tests that set_status() has the correct behavior. Basically: 1. Adds a batch which has two transactions. 2. Calls next_transaction() to get the first Transaction. 3. Calls next_transaction() to verify that it returns None. 4. Calls set_status() to mark the first transaction applied. 5. Calls next_transaction() to get the second Transaction. Step 3 returns None because the first transaction hasn't been marked as applied, and the SerialScheduler will only return one not-applied Transaction at a time. Step 5 is expected to return the second Transaction, not None, since the first Transaction was marked as applied in the previous step. """ private_key = bitcoin.random_key() public_key = bitcoin.encode_pubkey( bitcoin.privkey_to_pubkey(private_key), "hex") context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) txns = [] for name in ['a', 'b']: txn = create_transaction( name=name, private_key=private_key, public_key=public_key) txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) scheduler.add_batch(batch) scheduled_txn_info = scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEquals('a', scheduled_txn_info.txn.payload.decode()) self.assertIsNone(scheduler.next_transaction()) scheduler.set_transaction_execution_result( scheduled_txn_info.txn.header_signature, is_valid=False, context_id=None) scheduled_txn_info = scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEquals('b', scheduled_txn_info.txn.payload.decode())
def test_transaction_order(self): """Tests the that transactions are returned in order added. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. This test also creates a second iterator and verifies that both iterators return the same transactions. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator. """ private_key = bitcoin.random_key() public_key = bitcoin.encode_pubkey( bitcoin.privkey_to_pubkey(private_key), "hex") context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn = create_transaction( name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) scheduler.add_batch(batch) scheduler.finalize() iterable1 = iter(scheduler) iterable2 = iter(scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertEqual(scheduled_txn_info, next(iterable2)) self.assertIsNotNone(scheduled_txn_info) self.assertEquals(txn.payload, scheduled_txn_info.txn.payload) scheduler.set_transaction_execution_result( txn.header_signature, False, None) with self.assertRaises(StopIteration): next(iterable1)
def test_valid_batch_invalid_batch(self): """Tests the squash function. That the correct hash is being used for each txn and that the batch ending state hash is being set. Basically: 1. Adds two batches, one where all the txns are valid, and one where one of the txns is invalid. 2. Run through the scheduler executor interaction as txns are processed. 3. Verify that the valid state root is obtained through the squash function. 4. Verify that correct batch statuses are set """ private_key = bitcoin.random_key() public_key = bitcoin.encode_pubkey( bitcoin.privkey_to_pubkey(private_key), "hex") context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) # 1) batch_signatures = [] for names in [['a', 'b'], ['invalid', 'c']]: batch_txns = [] for name in names: txn = create_transaction( name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) batch_signatures.append(batch.header_signature) scheduler.add_batch(batch) scheduler.finalize() # 2) sched1 = iter(scheduler) invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest() while not scheduler.complete(block=False): txn_info = next(sched1) txn_header = transaction_pb2.TransactionHeader() txn_header.ParseFromString(txn_info.txn.header) inputs_or_outputs = list(txn_header.inputs) c_id = context_manager.create_context(txn_info.state_hash, inputs_or_outputs, inputs_or_outputs) if txn_header.payload_sha512 == invalid_payload: scheduler.set_transaction_execution_result( txn_info.txn.header_signature, False, c_id) else: context_manager.set(c_id, [{inputs_or_outputs[0]: 1}]) scheduler.set_transaction_execution_result( txn_info.txn.header_signature, True, c_id) sched2 = iter(scheduler) # 3) txn_info_a = next(sched2) self.assertEquals(first_state_root, txn_info_a.state_hash) txn_a_header = transaction_pb2.TransactionHeader() txn_a_header.ParseFromString(txn_info_a.txn.header) inputs_or_outputs = list(txn_a_header.inputs) address_a = inputs_or_outputs[0] c_id_a = context_manager.create_context(first_state_root, inputs_or_outputs, inputs_or_outputs) context_manager.set(c_id_a, [{address_a: 1}]) state_root2 = context_manager.commit_context([c_id_a], virtual=False) txn_info_b = next(sched2) self.assertEquals(txn_info_b.state_hash, state_root2) txn_b_header = transaction_pb2.TransactionHeader() txn_b_header.ParseFromString(txn_info_b.txn.header) inputs_or_outputs = list(txn_b_header.inputs) address_b = inputs_or_outputs[0] c_id_b = context_manager.create_context(state_root2, inputs_or_outputs, inputs_or_outputs) context_manager.set(c_id_b, [{address_b: 1}]) state_root3 = context_manager.commit_context([c_id_b], virtual=False) txn_infoInvalid = next(sched2) self.assertEquals(txn_infoInvalid.state_hash, state_root3) txn_info_c = next(sched2) self.assertEquals(txn_info_c.state_hash, state_root3) # 4) batch1_result = scheduler.get_batch_execution_result( batch_signatures[0]) self.assertTrue(batch1_result.is_valid) self.assertEquals(batch1_result.state_hash, state_root3) batch2_result = scheduler.get_batch_execution_result( batch_signatures[1]) self.assertFalse(batch2_result.is_valid) self.assertIsNone(batch2_result.state_hash)
def start(self, on_done): """ Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Args: on_done (function): a function called on completion Raises: InvalidGenesisStateError: raises this error if a genesis block is unable to be produced, or the resulting block-chain-id saved. """ genesis_file = os.path.join(self._data_dir, 'genesis.batch') try: with open(genesis_file, 'rb') as batch_file: genesis_data = genesis_pb2.GenesisData() genesis_data.ParseFromString(batch_file.read()) LOGGER.info('Producing genesis block from %s', genesis_file) except IOError: raise InvalidGenesisStateError( "Genesis File {} specified, but unreadable".format( genesis_file)) initial_state_root = self._context_manager.get_first_root() genesis_batches = [batch for batch in genesis_data.batches] if genesis_batches: scheduler = SerialScheduler( self._context_manager.get_squash_handler(), initial_state_root, always_persist=True) LOGGER.debug('Adding %s batches', len(genesis_data.batches)) for batch in genesis_data.batches: scheduler.add_batch(batch) self._transaction_executor.execute(scheduler) scheduler.finalize() scheduler.complete(block=True) state_hash = initial_state_root for batch in genesis_batches: result = scheduler.get_batch_execution_result( batch.header_signature) if result is None or not result.is_valid: raise InvalidGenesisStateError( 'Unable to create genesis block, due to batch {}' .format(batch.header_signature)) if result.state_hash is not None: state_hash = result.state_hash LOGGER.debug('Produced state hash %s for genesis block.', state_hash) block_builder = self._generate_genesis_block() block_builder.add_batches(genesis_batches) block_builder.set_state_hash(state_hash) block_publisher = self._get_block_publisher(initial_state_root) if not block_publisher.initialize_block(block_builder.block_header): LOGGER.error('Consensus refused to initialize consensus block.') raise InvalidGenesisConsensusError( 'Consensus refused to initialize genesis block.') if not block_publisher.finalize_block(block_builder.block_header): LOGGER.error('Consensus refused to finalize genesis block.') raise InvalidGenesisConsensusError( 'Consensus refused to finalize genesis block.') self._sign_block(block_builder) block = block_builder.build_block() blkw = BlockWrapper(block=block) LOGGER.info('Genesis block created: %s', blkw) self._completer.add_block(block) self._block_manager.put([blkw.block]) self._block_manager.persist(blkw.identifier, "commit_store") self._chain_id_manager.save_block_chain_id(block.header_signature) LOGGER.debug('Deleting genesis data.') os.remove(genesis_file) if on_done is not None: on_done()
def create_scheduler(self, squash_handler, first_state_root): return SerialScheduler(squash_handler, first_state_root)
def start(self, on_done): """ Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Args: on_done (function): a function called on completion Raises: InvalidGenesisStateError: raises this error if a genesis block is unable to be produced, or the resulting block-chain-id saved. """ genesis_file = os.path.join(self._data_dir, 'genesis.batch') try: with open(genesis_file, 'rb') as batch_file: genesis_data = genesis_pb2.GenesisData() genesis_data.ParseFromString(batch_file.read()) LOGGER.info('Producing genesis block from %s', genesis_file) except IOError: raise InvalidGenesisStateError( "Genesis File {} specified, but unreadable".format( genesis_file)) initial_state_root = self._context_manager.get_first_root() genesis_batches = [batch for batch in genesis_data.batches] if genesis_batches: scheduler = SerialScheduler( self._context_manager.get_squash_handler(), initial_state_root, always_persist=True) LOGGER.debug('Adding %s batches', len(genesis_data.batches)) for batch in genesis_data.batches: scheduler.add_batch(batch) self._transaction_executor.execute(scheduler) scheduler.finalize() scheduler.complete(block=True) state_hash = initial_state_root for batch in genesis_batches: result = scheduler.get_batch_execution_result( batch.header_signature) if result is None or not result.is_valid: raise InvalidGenesisStateError( 'Unable to create genesis block, due to batch {}'.format( batch.header_signature)) if result.state_hash is not None: state_hash = result.state_hash LOGGER.debug('Produced state hash %s for genesis block.', state_hash) block_builder = self._generate_genesis_block() block_builder.add_batches(genesis_batches) block_builder.set_state_hash(state_hash) block_publisher = self._get_block_publisher(initial_state_root) if not block_publisher.initialize_block(block_builder.block_header): LOGGER.error('Consensus refused to initialize consensus block.') raise InvalidGenesisConsensusError( 'Consensus refused to initialize genesis block.') if not block_publisher.finalize_block(block_builder.block_header): LOGGER.error('Consensus refused to finalize genesis block.') raise InvalidGenesisConsensusError( 'Consensus refused to finalize genesis block.') self._sign_block(block_builder) block = block_builder.build_block() blkw = BlockWrapper(block=block, status=BlockStatus.Valid) LOGGER.info('Genesis block created: %s', blkw) self._completer.add_block(block) self._block_store.update_chain([blkw]) self._chain_id_manager.save_block_chain_id(block.header_signature) LOGGER.debug('Deleting genesis data.') os.remove(genesis_file) if on_done is not None: on_done()
def start(self, on_done): """ Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Params: on_done - a function called on completion """ genesis_file = os.path.join(self._data_dir, 'genesis.batch') try: with open(genesis_file, 'rb') as batch_file: genesis_data = genesis_pb2.GenesisData() genesis_data.ParseFromString(batch_file.read()) LOGGER.info('Producing genesis block from %s', genesis_file) except IOError: raise InvalidGenesisStateError( "Genesis File {} specified, but unreadable".format( genesis_file)) initial_state_root = self._context_manager.get_first_root() block = GenesisController._generate_genesis_block() genesis_batches = [batch for batch in genesis_data.batches] if len(genesis_batches) > 0: scheduler = SerialScheduler( self._context_manager.get_squash_handler(), initial_state_root) LOGGER.debug('Adding %s batches', len(genesis_data.batches)) for batch in genesis_data.batches: scheduler.add_batch(batch) self._transaction_executor.execute(scheduler, require_txn_processors=True) scheduler.finalize() scheduler.complete(block=True) state_hash = initial_state_root for batch in genesis_batches: result = scheduler.get_batch_execution_result( batch.header_signature) if result is None or not result.is_valid: raise InvalidGenesisStateError( 'Unable to create genesis block, due to batch {}' .format(batch.header_signature)) state_hash = result.state_hash LOGGER.debug('Produced state hash %s for genesis block.', state_hash) block.add_batches(genesis_batches) block.set_state_hash(state_hash) GenesisController._sign_block(block) LOGGER.info('genesis block created: %s', block.header_signature) self._completer.add_block(block.get_block()) self._block_store['chain_head_id'] = block.header_signature block_state = BlockState(block_wrapper=block, weight=0, status=BlockStatus.Valid) self._block_store[block.header_signature] = block_state self._save_block_chain_id(block.header_signature) LOGGER.debug('deleting genesis data') os.remove(genesis_file) if on_done is not None: on_done()
class TestSerialScheduler(unittest.TestCase): def setUp(self): self.context_manager = ContextManager(dict_database.DictDatabase(), state_delta_store=Mock()) squash_handler = self.context_manager.get_squash_handler() self.first_state_root = self.context_manager.get_first_root() self.scheduler = SerialScheduler(squash_handler, self.first_state_root, always_persist=False) def tearDown(self): self.context_manager.stop() def test_transaction_order(self): """Tests the that transactions are returned in order added. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. This test also creates a second iterator and verifies that both iterators return the same transactions. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) iterable2 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertEqual(scheduled_txn_info, next(iterable2)) self.assertIsNotNone(scheduled_txn_info) self.assertEqual(txn.payload, scheduled_txn_info.txn.payload) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( txn.header_signature, True, c_id) with self.assertRaises(StopIteration): next(iterable1) def test_completion_on_last_result(self): """Tests the that the schedule is not marked complete until the last result is set. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. Test that the value of `complete` is false until the last value. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator, and the complete is true in the at the end. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertFalse(self.scheduler.complete(block=False)) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( txn.header_signature, True, c_id) self.assertTrue(self.scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable1) def test_set_status(self): """Tests that set_status() has the correct behavior. Basically: 1. Adds a batch which has two transactions. 2. Calls next_transaction() to get the first Transaction. 3. Calls next_transaction() to verify that it returns None. 4. Calls set_status() to mark the first transaction applied. 5. Calls next_transaction() to get the second Transaction. Step 3 returns None because the first transaction hasn't been marked as applied, and the SerialScheduler will only return one not-applied Transaction at a time. Step 5 is expected to return the second Transaction, not None, since the first Transaction was marked as applied in the previous step. """ private_key = signing.generate_private_key() public_key = signing.generate_public_key(private_key) txns = [] for name in ['a', 'b']: txn, _ = create_transaction( payload=name.encode(), private_key=private_key, public_key=public_key) txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEqual('a', scheduled_txn_info.txn.payload.decode()) self.assertIsNone(self.scheduler.next_transaction()) c_id = self.context_manager.create_context( self.first_state_root, base_contexts=scheduled_txn_info.base_context_ids, inputs=[], outputs=[]) self.scheduler.set_transaction_execution_result( scheduled_txn_info.txn.header_signature, is_valid=True, context_id=c_id) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEqual('b', scheduled_txn_info.txn.payload.decode())
def test_valid_batch_invalid_batch(self): """Tests the squash function. That the correct hash is being used for each txn and that the batch ending state hash is being set. Basically: 1. Adds two batches, one where all the txns are valid, and one where one of the txns is invalid. 2. Run through the scheduler executor interaction as txns are processed. 3. Verify that the valid state root is obtained through the squash function. 4. Verify that correct batch statuses are set """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) # 1) batch_signatures = [] for names in [['a', 'b'], ['invalid', 'c']]: batch_txns = [] for name in names: txn = create_transaction(name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) batch = create_batch(transactions=batch_txns, private_key=private_key, public_key=public_key) batch_signatures.append(batch.header_signature) scheduler.add_batch(batch) scheduler.finalize() # 2) sched1 = iter(scheduler) invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest() while not scheduler.complete(block=False): txn_info = next(sched1) txn_header = transaction_pb2.TransactionHeader() txn_header.ParseFromString(txn_info.txn.header) inputs_or_outputs = list(txn_header.inputs) c_id = context_manager.create_context(txn_info.state_hash, inputs_or_outputs, inputs_or_outputs) if txn_header.payload_sha512 == invalid_payload: scheduler.set_transaction_execution_result( txn_info.txn.header_signature, False, c_id) else: context_manager.set(c_id, [{inputs_or_outputs[0]: 1}]) scheduler.set_transaction_execution_result( txn_info.txn.header_signature, True, c_id) sched2 = iter(scheduler) # 3) txn_info_a = next(sched2) self.assertEquals(first_state_root, txn_info_a.state_hash) txn_a_header = transaction_pb2.TransactionHeader() txn_a_header.ParseFromString(txn_info_a.txn.header) inputs_or_outputs = list(txn_a_header.inputs) address_a = inputs_or_outputs[0] c_id_a = context_manager.create_context(first_state_root, inputs_or_outputs, inputs_or_outputs) context_manager.set(c_id_a, [{address_a: 1}]) state_root2 = context_manager.commit_context([c_id_a], virtual=False) txn_info_b = next(sched2) self.assertEquals(txn_info_b.state_hash, state_root2) txn_b_header = transaction_pb2.TransactionHeader() txn_b_header.ParseFromString(txn_info_b.txn.header) inputs_or_outputs = list(txn_b_header.inputs) address_b = inputs_or_outputs[0] c_id_b = context_manager.create_context(state_root2, inputs_or_outputs, inputs_or_outputs) context_manager.set(c_id_b, [{address_b: 1}]) state_root3 = context_manager.commit_context([c_id_b], virtual=False) txn_infoInvalid = next(sched2) self.assertEquals(txn_infoInvalid.state_hash, state_root3) txn_info_c = next(sched2) self.assertEquals(txn_info_c.state_hash, state_root3) # 4) batch1_result = scheduler.get_batch_execution_result( batch_signatures[0]) self.assertTrue(batch1_result.is_valid) self.assertEquals(batch1_result.state_hash, state_root3) batch2_result = scheduler.get_batch_execution_result( batch_signatures[1]) self.assertFalse(batch2_result.is_valid) self.assertIsNone(batch2_result.state_hash)
def start(self, on_done): """ Starts the genesis block creation process. Will call the given `on_done` callback on successful completion. Params: on_done - a function called on completion """ genesis_file = os.path.join(self._data_dir, 'genesis.batch') try: with open(genesis_file, 'rb') as batch_file: genesis_data = genesis_pb2.GenesisData() genesis_data.ParseFromString(batch_file.read()) LOGGER.info('Producing genesis block from %s', genesis_file) except IOError: raise InvalidGenesisStateError( "Genesis File {} specified, but unreadable".format( genesis_file)) initial_state_root = self._context_manager.get_first_root() block_builder = GenesisController._generate_genesis_block() genesis_batches = [batch for batch in genesis_data.batches] if len(genesis_batches) > 0: scheduler = SerialScheduler( self._context_manager.get_squash_handler(), initial_state_root) LOGGER.debug('Adding %s batches', len(genesis_data.batches)) for batch in genesis_data.batches: scheduler.add_batch(batch) self._transaction_executor.execute(scheduler, require_txn_processors=True) scheduler.finalize() scheduler.complete(block=True) state_hash = initial_state_root for batch in genesis_batches: result = scheduler.get_batch_execution_result( batch.header_signature) if result is None or not result.is_valid: raise InvalidGenesisStateError( 'Unable to create genesis block, due to batch {}' .format(batch.header_signature)) state_hash = result.state_hash LOGGER.debug('Produced state hash %s for genesis block.', state_hash) block_builder.add_batches(genesis_batches) block_builder.set_state_hash(state_hash) GenesisController._sign_block(block_builder) block = block_builder.build_block() blkw = BlockWrapper(block=block, status=BlockStatus.Valid) LOGGER.info('Genesis block created: %s', blkw) self._completer.add_block(block) self._block_store['chain_head_id'] = blkw.identifier self._block_store[blkw.identifier] = { "block": blkw.block, "weight": blkw.weight } self._save_block_chain_id(block.header_signature) LOGGER.debug('Deleting genesis data.') os.remove(genesis_file) if on_done is not None: on_done()
def test_add_batch_after_empty_iteration(self): """Tests that iterations will continue as result of add_batch(). This test calls next() on a scheduler iterator in a separate thread called the IteratorThread. The test waits until the IteratorThread is waiting in next(); internal to the scheduler, it will be waiting on a condition variable as there are no transactions to return and the scheduler is not finalized. Then, the test continues by running add_batch(), which should cause the next() running in the IterableThread to return a transaction. This demonstrates the scheduler's ability to wait on an empty iterator but continue as transactions become available via add_batch. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = context_manager.get_squash_handler() first_state_root = context_manager.get_first_root() scheduler = SerialScheduler(squash_handler, first_state_root) # Create a basic transaction and batch. txn = create_transaction(name='a', private_key=private_key, public_key=public_key) batch = create_batch(transactions=[txn], private_key=private_key, public_key=public_key) # This class is used to run the scheduler's iterator. class IteratorThread(threading.Thread): def __init__(self, iterable): threading.Thread.__init__(self) self._iterable = iterable self.ready = False self.condition = threading.Condition() self.txn_info = None def run(self): # Even with this lock here, there is a race condition between # exit of the lock and entry into the iterable. That is solved # by sleep later in the test. with self.condition: self.ready = True self.condition.notify() txn_info = next(self._iterable) with self.condition: self.txn_info = txn_info self.condition.notify() # This is the iterable we are testing, which we will use in the # IteratorThread. We also use it in this thread below to test # for StopIteration. iterable = iter(scheduler) # Create and startup thread. thread = IteratorThread(iterable=iterable) thread.start() # Pause here to make sure the thread is absolutely as far along as # possible; in other words, right before we call next() in it's run() # method. When this returns, there should be very little time until # the iterator is blocked on a condition variable. with thread.condition: while not thread.ready: thread.condition.wait() # May the daemons stay away during this dark time, and may we be # forgiven upon our return. time.sleep(1) # At this point, the IteratorThread should be waiting next(), so we go # ahead and give it a batch. scheduler.add_batch(batch) # If all goes well, thread.txn_info will get set to the result of the # next() call. If not, it will timeout and thread.txn_info will be # empty. with thread.condition: if thread.txn_info is None: thread.condition.wait(5) # If thread.txn_info is empty, the test failed as iteration did not # continue after add_batch(). self.assertIsNotNone(thread.txn_info, "iterable failed to return txn") self.assertEquals(txn.payload, thread.txn_info.txn.payload) # Continue with normal shutdown/cleanup. scheduler.finalize() scheduler.set_transaction_execution_result(txn.header_signature, False, None) with self.assertRaises(StopIteration): next(iterable)
def setUp(self): self.context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = self.context_manager.get_squash_handler() self.first_state_root = self.context_manager.get_first_root() self.scheduler = SerialScheduler(squash_handler, self.first_state_root)
class TestSerialScheduler(unittest.TestCase): def setUp(self): self.context_manager = ContextManager(dict_database.DictDatabase()) squash_handler = self.context_manager.get_squash_handler() self.first_state_root = self.context_manager.get_first_root() self.scheduler = SerialScheduler(squash_handler, self.first_state_root) def tearDown(self): self.context_manager.stop() def test_transaction_order(self): """Tests the that transactions are returned in order added. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. This test also creates a second iterator and verifies that both iterators return the same transactions. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn = create_transaction( name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) iterable2 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertEqual(scheduled_txn_info, next(iterable2)) self.assertIsNotNone(scheduled_txn_info) self.assertEquals(txn.payload, scheduled_txn_info.txn.payload) self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) with self.assertRaises(StopIteration): next(iterable1) def test_completion_on_finalize(self): """Tests that iteration will stop when finalized is called on an otherwise complete scheduler. Adds one batch and transaction, then verifies the iterable returns that transaction. Sets the execution result and then calls finalize. Since the the scheduler is complete (all transactions have had results set, and it's been finalized), we should get a StopIteration. This check is useful in making sure the finalize() can occur after all set_transaction_execution_result()s have been performed, because in a normal situation, finalize will probably occur prior to those calls. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txn = create_transaction( name='a', private_key=private_key, public_key=public_key) batch = create_batch( transactions=[txn], private_key=private_key, public_key=public_key) iterable = iter(self.scheduler) self.scheduler.add_batch(batch) scheduled_txn_info = next(iterable) self.assertIsNotNone(scheduled_txn_info) self.assertEquals(txn.payload, scheduled_txn_info.txn.payload) self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) self.scheduler.finalize() with self.assertRaises(StopIteration): next(iterable) def test_completion_on_finalize_only_when_done(self): """Tests that iteration will stop when finalized is called on an otherwise complete scheduler. Adds one batch and transaction, then verifies the iterable returns that transaction. Finalizes then sets the execution result. The schedule should not be marked as complete. This check is useful in making sure the finalize() can occur after all set_transaction_execution_result()s have been performed, because in a normal situation, finalize will probably occur prior to those calls. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txn = create_transaction( name='a', private_key=private_key, public_key=public_key) batch = create_batch( transactions=[txn], private_key=private_key, public_key=public_key) iterable = iter(self.scheduler) self.scheduler.add_batch(batch) scheduled_txn_info = next(iterable) self.assertIsNotNone(scheduled_txn_info) self.assertEquals(txn.payload, scheduled_txn_info.txn.payload) self.scheduler.finalize() self.assertFalse(self.scheduler.complete(block=False)) self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) self.assertTrue(self.scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable) def test_completion_on_last_result(self): """Tests the that the schedule is not marked complete until the last result is set. Adds three batches with varying number of transactions, then tests that they are returned in the appropriate order when using an iterator. Test that the value of `complete` is false until the last value. This test also finalizes the scheduler and verifies that StopIteration is thrown by the iterator, and the complete is true in the at the end. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txns = [] for names in [['a', 'b', 'c'], ['d', 'e'], ['f', 'g', 'h', 'i']]: batch_txns = [] for name in names: txn = create_transaction( name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) self.scheduler.finalize() iterable1 = iter(self.scheduler) for txn in txns: scheduled_txn_info = next(iterable1) self.assertFalse(self.scheduler.complete(block=False)) self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) self.assertTrue(self.scheduler.complete(block=False)) with self.assertRaises(StopIteration): next(iterable1) def test_add_batch_after_empty_iteration(self): """Tests that iterations will continue as result of add_batch(). This test calls next() on a scheduler iterator in a separate thread called the IteratorThread. The test waits until the IteratorThread is waiting in next(); internal to the scheduler, it will be waiting on a condition variable as there are no transactions to return and the scheduler is not finalized. Then, the test continues by running add_batch(), which should cause the next() running in the IterableThread to return a transaction. This demonstrates the scheduler's ability to wait on an empty iterator but continue as transactions become available via add_batch. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) # Create a basic transaction and batch. txn = create_transaction( name='a', private_key=private_key, public_key=public_key) batch = create_batch( transactions=[txn], private_key=private_key, public_key=public_key) # This class is used to run the scheduler's iterator. class IteratorThread(threading.Thread): def __init__(self, iterable): threading.Thread.__init__(self) self._iterable = iterable self.ready = False self.condition = threading.Condition() self.txn_info = None def run(self): # Even with this lock here, there is a race condition between # exit of the lock and entry into the iterable. That is solved # by sleep later in the test. with self.condition: self.ready = True self.condition.notify() txn_info = next(self._iterable) with self.condition: self.txn_info = txn_info self.condition.notify() # This is the iterable we are testing, which we will use in the # IteratorThread. We also use it in this thread below to test # for StopIteration. iterable = iter(self.scheduler) # Create and startup thread. thread = IteratorThread(iterable=iterable) thread.start() # Pause here to make sure the thread is absolutely as far along as # possible; in other words, right before we call next() in it's run() # method. When this returns, there should be very little time until # the iterator is blocked on a condition variable. with thread.condition: while not thread.ready: thread.condition.wait() # May the daemons stay away during this dark time, and may we be # forgiven upon our return. time.sleep(1) # At this point, the IteratorThread should be waiting next(), so we go # ahead and give it a batch. self.scheduler.add_batch(batch) # If all goes well, thread.txn_info will get set to the result of the # next() call. If not, it will timeout and thread.txn_info will be # empty. with thread.condition: if thread.txn_info is None: thread.condition.wait(5) # If thread.txn_info is empty, the test failed as iteration did not # continue after add_batch(). self.assertIsNotNone(thread.txn_info, "iterable failed to return txn") self.assertEquals(txn.payload, thread.txn_info.txn.payload) # Continue with normal shutdown/cleanup. self.scheduler.finalize() self.scheduler.set_transaction_execution_result( txn.header_signature, False, None) with self.assertRaises(StopIteration): next(iterable) def test_set_status(self): """Tests that set_status() has the correct behavior. Basically: 1. Adds a batch which has two transactions. 2. Calls next_transaction() to get the first Transaction. 3. Calls next_transaction() to verify that it returns None. 4. Calls set_status() to mark the first transaction applied. 5. Calls next_transaction() to get the second Transaction. Step 3 returns None because the first transaction hasn't been marked as applied, and the SerialScheduler will only return one not-applied Transaction at a time. Step 5 is expected to return the second Transaction, not None, since the first Transaction was marked as applied in the previous step. """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) txns = [] for name in ['a', 'b']: txn = create_transaction( name=name, private_key=private_key, public_key=public_key) txns.append(txn) batch = create_batch( transactions=txns, private_key=private_key, public_key=public_key) self.scheduler.add_batch(batch) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEquals('a', scheduled_txn_info.txn.payload.decode()) self.assertIsNone(self.scheduler.next_transaction()) self.scheduler.set_transaction_execution_result( scheduled_txn_info.txn.header_signature, is_valid=False, context_id=None) scheduled_txn_info = self.scheduler.next_transaction() self.assertIsNotNone(scheduled_txn_info) self.assertEquals('b', scheduled_txn_info.txn.payload.decode()) def test_valid_batch_invalid_batch(self): """Tests the squash function. That the correct hash is being used for each txn and that the batch ending state hash is being set. Basically: 1. Adds two batches, one where all the txns are valid, and one where one of the txns is invalid. 2. Run through the scheduler executor interaction as txns are processed. 3. Verify that the valid state root is obtained through the squash function. 4. Verify that correct batch statuses are set """ private_key = signing.generate_privkey() public_key = signing.generate_pubkey(private_key) # 1) batch_signatures = [] for names in [['a', 'b'], ['invalid', 'c']]: batch_txns = [] for name in names: txn = create_transaction( name=name, private_key=private_key, public_key=public_key) batch_txns.append(txn) batch = create_batch( transactions=batch_txns, private_key=private_key, public_key=public_key) batch_signatures.append(batch.header_signature) self.scheduler.add_batch(batch) self.scheduler.finalize() # 2) sched1 = iter(self.scheduler) invalid_payload = hashlib.sha512('invalid'.encode()).hexdigest() while not self.scheduler.complete(block=False): txn_info = next(sched1) txn_header = transaction_pb2.TransactionHeader() txn_header.ParseFromString(txn_info.txn.header) inputs_or_outputs = list(txn_header.inputs) c_id = self.context_manager.create_context( state_hash=txn_info.state_hash, inputs=inputs_or_outputs, outputs=inputs_or_outputs, base_contexts=txn_info.base_context_ids) if txn_header.payload_sha512 == invalid_payload: self.scheduler.set_transaction_execution_result( txn_info.txn.header_signature, False, c_id) else: self.context_manager.set(c_id, [{inputs_or_outputs[0]: 1}]) self.scheduler.set_transaction_execution_result( txn_info.txn.header_signature, True, c_id) sched2 = iter(self.scheduler) # 3) txn_info_a = next(sched2) self.assertEquals(self.first_state_root, txn_info_a.state_hash) txn_a_header = transaction_pb2.TransactionHeader() txn_a_header.ParseFromString(txn_info_a.txn.header) inputs_or_outputs = list(txn_a_header.inputs) address_a = inputs_or_outputs[0] c_id_a = self.context_manager.create_context( state_hash=self.first_state_root, inputs=inputs_or_outputs, outputs=inputs_or_outputs, base_contexts=txn_info_a.base_context_ids) self.context_manager.set(c_id_a, [{address_a: 1}]) state_root2 = self.context_manager.commit_context([c_id_a], virtual=False) txn_info_b = next(sched2) self.assertEquals(txn_info_b.state_hash, state_root2) txn_b_header = transaction_pb2.TransactionHeader() txn_b_header.ParseFromString(txn_info_b.txn.header) inputs_or_outputs = list(txn_b_header.inputs) address_b = inputs_or_outputs[0] c_id_b = self.context_manager.create_context( state_hash=state_root2, inputs=inputs_or_outputs, outputs=inputs_or_outputs, base_contexts=txn_info_b.base_context_ids) self.context_manager.set(c_id_b, [{address_b: 1}]) state_root3 = self.context_manager.commit_context([c_id_b], virtual=False) txn_infoInvalid = next(sched2) self.assertEquals(txn_infoInvalid.state_hash, state_root3) txn_info_c = next(sched2) self.assertEquals(txn_info_c.state_hash, state_root3) # 4) batch1_result = self.scheduler.get_batch_execution_result( batch_signatures[0]) self.assertTrue(batch1_result.is_valid) self.assertEquals(batch1_result.state_hash, state_root3) batch2_result = self.scheduler.get_batch_execution_result( batch_signatures[1]) self.assertFalse(batch2_result.is_valid) self.assertIsNone(batch2_result.state_hash)