def test_tx_version(self): from hathor.transaction.base_transaction import TxVersion # test the 1st byte of version field is ignored version = TxVersion(0xFF00) self.assertEqual(version.get_cls(), Block) version = TxVersion(0xFF01) self.assertEqual(version.get_cls(), Transaction) # test serialization doesn't mess up with version block = Block(version=0xFF00, nonce=100, weight=1) block2 = block.clone() self.assertEqual(block.version, block2.version)
def setUp(self, tx_storage, reactor=None): if not reactor: self.reactor = Clock() else: self.reactor = reactor self.reactor.advance(time.time()) self.tx_storage = tx_storage assert tx_storage.first_timestamp > 0 tx_storage._manually_initialize() self.genesis = self.tx_storage.get_all_genesis() self.genesis_blocks = [tx for tx in self.genesis if tx.is_block] self.genesis_txs = [tx for tx in self.genesis if not tx.is_block] from hathor.manager import HathorManager self.tmpdir = tempfile.mkdtemp() wallet = Wallet(directory=self.tmpdir) wallet.unlock(b'teste') self.manager = HathorManager(self.reactor, tx_storage=self.tx_storage, wallet=wallet) self.tx_storage.wallet_index = WalletIndex(self.manager.pubsub) self.tx_storage.tokens_index = TokensIndex() block_parents = [tx.hash for tx in chain(self.genesis_blocks, self.genesis_txs)] output = TxOutput(200, bytes.fromhex('1e393a5ce2ff1c98d4ff6892f2175100f2dad049')) self.block = Block(timestamp=MIN_TIMESTAMP, weight=12, outputs=[output], parents=block_parents, nonce=100781, storage=tx_storage) self.block.resolve() self.block.verify() tx_parents = [tx.hash for tx in self.genesis_txs] tx_input = TxInput( tx_id=self.genesis_blocks[0].hash, index=0, data=bytes.fromhex('46304402203470cb9818c9eb842b0c433b7e2b8aded0a51f5903e971649e870763d0266a' 'd2022049b48e09e718c4b66a0f3178ef92e4d60ee333d2d0e25af8868acf5acbb35aaa583' '056301006072a8648ce3d020106052b8104000a034200042ce7b94cba00b654d4308f8840' '7345cacb1f1032fb5ac80407b74d56ed82fb36467cb7048f79b90b1cf721de57e942c5748' '620e78362cf2d908e9057ac235a63')) self.tx = Transaction( timestamp=MIN_TIMESTAMP + 2, weight=10, nonce=932049, inputs=[tx_input], outputs=[output], tokens=[bytes.fromhex('0023be91834c973d6a6ddd1a0ae411807b7c8ef2a015afb5177ee64b666ce602')], parents=tx_parents, storage=tx_storage) self.tx.resolve() # Disable weakref to test the internal methods. Otherwise, most methods return objects from weakref. self.tx_storage._disable_weakref() self.tx_storage.enable_lock()
def add_voided_by(self, block: Block, voided_hash: Optional[bytes] = None) -> bool: """ Add a new hash in its `meta.voided_by`. If `voided_hash` is None, it includes the block's own hash. """ assert block.storage is not None assert block.hash is not None storage = block.storage if voided_hash is None: voided_hash = block.hash assert voided_hash is not None meta = block.get_metadata() if not meta.voided_by: meta.voided_by = set() if voided_hash in meta.voided_by: return False self.log.debug('add_voided_by', block=block.hash_hex, voided_hash=voided_hash.hex()) meta.voided_by.add(voided_hash) storage.save_transaction(block, only_metadata=True) spent_by: Iterable[bytes] = chain(*meta.spent_outputs.values()) for tx_hash in spent_by: tx = storage.get_transaction(tx_hash) assert isinstance(tx, Transaction) self.consensus.transaction_algorithm.add_voided_by(tx, voided_hash) return True
def render_POST(self, request): """ POST request /mining/ Expects a parameter 'block_bytes' that is the block in bytes Create the block object from the bytes and propagate it :rtype: bytes """ try: post_data = json.loads(request.content.read().decode('utf-8')) block_bytes_str = post_data['block_bytes'] block_bytes = base64.b64decode(block_bytes_str) block = Block.create_from_struct(block_bytes, storage=self.manager.tx_storage) except (AttributeError, KeyError, ValueError, json.JSONDecodeError, binascii.Error, struct.error): # XXX ideally, we should catch each error separately and send an specific error # message, but we only return 0 or 1 on the API # AttributeError, json.JSONDecodeError: empty data or error decoding json # KeyError: missing 'block_bytes' on post_data # ValueError, struct.error: raised in create_block_from_struct # binascii.Error: incorrect base64 data return b'0' ret = self.manager.submit_block(block) if ret: return b'1' return b'0'
def _find_first_parent_in_best_chain(self, block: Block) -> BaseTransaction: """ Find the first block in the side chain that is not voided, i.e., the block where the fork started. In the simple schema below, the best chain's blocks are O's, the side chain's blocks are I's, and the first valid block is the [O]. O-O-O-O-[O]-O-O-O-O | +-I-I-I """ assert block.storage is not None storage = block.storage assert len( block.parents ) > 0, 'This should never happen because the genesis is always in the best chain' parent_hash = block.get_block_parent_hash() while True: parent = storage.get_transaction(parent_hash) assert isinstance(parent, Block) parent_meta = parent.get_metadata() if not parent_meta.voided_by: break assert len( parent.parents ) > 0, 'This should never happen because the genesis is always in the best chain' parent_hash = parent.get_block_parent_hash() return parent
def union_voided_by_from_parents(self, block: Block) -> Set[bytes]: """Return the union of the voided_by of block's parents. It does not include the hash of blocks because the hash of blocks are not propagated through the chains. For further information, see the docstring of the ConsensusAlgorithm class. """ voided_by: Set[bytes] = set() for parent in block.get_parents(): assert parent.hash is not None parent_meta = parent.get_metadata() voided_by2 = parent_meta.voided_by if voided_by2: if parent.is_block: # We must go through the blocks because the voidance caused # by a transaction must be sent ahead. For example, in the # chain b0 <- b1 <- b2 <- b3, if a transaction voids b1, then # it must also voids b2 and b3. But, we must ignore the hash of # the blocks themselves. voided_by2 = voided_by2.copy() voided_by2.discard(parent.hash) voided_by.update( self.consensus.filter_out_soft_voided_entries( parent, voided_by2)) return voided_by
def verify_spent_reward(self, block: Block) -> None: """ Verify that the reward being spent is old enough (has enoughs blocks after it on the best chain). We only consider the blocks on the best chain up to the tx's timestamp. """ assert self.storage is not None if self._height_cache: # get_best_block_tips is a costly method because there are many orphan blocks in our blockchain # This method is called for each input that spends a block and, since we have many transactions # that consolidate blocks rewards, this is common. # This cache helps to decrease 90% of the verify time for those transactions. best_height = self._height_cache else: # using the timestamp, we get the block immediately before this transaction in the blockchain tips = self.storage.get_best_block_tips(self.timestamp - 1) assert len(tips) > 0 tip = self.storage.get_transaction(tips[0]) assert tip is not None assert self.timestamp > tip.timestamp best_height = tip.get_metadata().height self._height_cache = best_height spent_height = block.get_metadata().height spend_blocks = best_height - spent_height if spend_blocks < settings.REWARD_SPEND_MIN_BLOCKS: raise RewardLocked( f'Reward needs {settings.REWARD_SPEND_MIN_BLOCKS} blocks to be spent, {spend_blocks} ' 'not enough')
def test_post(self): response_get = yield self.web.get('mining') data_get = response_get.json_value() block_bytes_str = data_get.get('block_bytes') block_bytes = base64.b64decode(block_bytes_str) block = Block.create_from_struct(block_bytes) block.weight = 4 block.resolve() block_bytes = bytes(block) block_bytes_str = base64.b64encode(block_bytes).decode('ascii') response_post = yield self.web.post('mining', {'block_bytes': block_bytes_str}) self.assertEqual(response_post.written[0], b'1') block.weight = 100 block_bytes = bytes(block) block_bytes_str = base64.b64encode(block_bytes).decode('ascii') response_post = yield self.web.post('mining', {'block_bytes': block_bytes_str}) # Probability 2^(100 - 256) of failing self.assertEqual(response_post.written[0], b'0')
def submit_block(self, blk: Block, fails_silently: bool = True) -> bool: """Used by submit block from all mining APIs. """ tips = self.tx_storage.get_best_block_tips() parent_hash = blk.get_block_parent_hash() if parent_hash not in tips: return False return self.propagate_tx(blk, fails_silently=fails_silently)
def calculate_block_difficulty(self, block: Block) -> float: """ Calculate block weight according to the ascendents of `block`, using calculate_next_weight.""" # In test mode we don't validate the block difficulty if self.test_mode & TestMode.TEST_BLOCK_WEIGHT: return 1.0 if block.is_genesis: return self.min_block_weight return self.calculate_next_weight(block.get_block_parent(), block.timestamp)
def submit_block(self, blk: Block, fails_silently: bool = True) -> bool: """Used by submit block from all mining APIs. """ tips = self.tx_storage.get_best_block_tips() parent_hash = blk.get_block_parent_hash() if parent_hash not in tips: self.log.warn('submit_block(): Ignoring block: parent not a tip', blk=blk.hash_hex) return False return self.propagate_tx(blk, fails_silently=fails_silently)
def resolve_block_bytes(block_bytes): """ From block bytes we create a block and resolve pow Return block bytes with hash and nonce after pow :rtype: bytes """ from hathor.transaction import Block block_bytes = base64.b64decode(block_bytes) block = Block.create_from_struct(block_bytes) block.resolve() return block.get_struct()
def test_block_outputs(self): from hathor.transaction import MAX_NUM_OUTPUTS from hathor.transaction.exceptions import TooManyOutputs # a block should have no more than MAX_NUM_OUTPUTS outputs parents = [tx.hash for tx in self.genesis] address = get_address_from_public_key(self.genesis_public_key) output_script = P2PKH.create_output_script(address) tx_outputs = [TxOutput(100, output_script)] * (MAX_NUM_OUTPUTS + 1) block = Block( nonce=100, outputs=tx_outputs, parents=parents, weight=1, # low weight so we don't waste time with PoW storage=self.tx_storage) with self.assertRaises(TooManyOutputs): block.verify_outputs()
def remove_voided_by_from_chain(self, block: Block) -> None: """ Remove voided_by from the chain. Now, it is the best chain. The blocks are visited from right to left (most recent to least recent). """ while True: assert block.is_block success = self.remove_voided_by(block) if not success: break block = block.get_block_parent()
def update_voided_by_from_parents(self, block: Block) -> bool: """Update block's metadata voided_by from parents. Return True if the block is voided and False otherwise.""" assert block.storage is not None voided_by: Set[bytes] = self.union_voided_by_from_parents(block) if voided_by: meta = block.get_metadata() if meta.voided_by: meta.voided_by.update(voided_by) else: meta.voided_by = voided_by.copy() block.storage.save_transaction(block, only_metadata=True) block.storage.del_from_indexes(block, relax_assert=True) return True return False
def render_POST(self, request): """ POST request /mining/ Expects a parameter 'block_bytes' that is the block in bytes Create the block object from the bytes and propagate it :rtype: bytes """ post_data = json.loads(request.content.read().decode('utf-8')) block_bytes_str = post_data['block_bytes'] block_bytes = base64.b64decode(block_bytes_str) block = Block.create_from_struct(block_bytes, storage=self.manager.tx_storage) ret = self.manager.propagate_tx(block) if ret: return b'1' return b'0'
def test_get_txs(self): first_block = add_new_blocks(self.manager, 30, advance_clock=1)[0] first_tx = add_new_transactions(self.manager, 3, advance_clock=1)[0] # Using timestamp as float to test code txs, _ = self.manager.tx_storage.get_older_txs_after( float(first_tx.timestamp), first_tx.hash, 3) self.assertEqual(len(txs), 2) txs, _ = self.manager.tx_storage.get_newer_txs_after( float(first_tx.timestamp), first_tx.hash, 3) self.assertEqual(len(txs), 2) blocks, _ = self.manager.tx_storage.get_older_blocks_after( float(first_block.timestamp), first_block.hash, 3) self.assertEqual(len(blocks), 1) blocks, _ = self.manager.tx_storage.get_newer_blocks_after( float(first_block.timestamp), first_block.hash, 3) self.assertEqual(len(blocks), 3) tx = txs[0] proto = tx.to_proto() tx2 = Transaction.create_from_proto(proto) self.assertEqual(tx, tx2) block = blocks[0] proto2 = block.to_proto() block2 = Block.create_from_proto(proto2) self.assertEqual(block, block2) tx3 = tx_or_block_from_proto(proto) self.assertEqual(tx, tx3) proto.ClearField('transaction') with self.assertRaises(ValueError): tx_or_block_from_proto(proto) t = datetime.datetime.now() - datetime.timedelta(seconds=1) t_tx = tx.get_time_from_now() t2_tx = tx.get_time_from_now(now=t) self.assertNotEqual(t_tx, t2_tx)
def verify_spent_reward(self, block: Block) -> None: """ Verify that the reward being spent is old enough (has enoughs blocks after it on the best chain). We only consider the blocks on the best chain up to the tx's timestamp. """ assert self.storage is not None # using the timestamp, we get the block immediately before this transaction in the blockchain tips = self.storage.get_best_block_tips(self.timestamp - 1) assert len(tips) > 0 tip = self.storage.get_transaction(tips[0]) assert tip is not None assert self.timestamp > tip.timestamp best_height = tip.get_metadata().height spent_height = block.get_metadata().height spend_blocks = best_height - spent_height if spend_blocks < settings.REWARD_SPEND_MIN_BLOCKS: raise RewardLocked( f'Reward needs {settings.REWARD_SPEND_MIN_BLOCKS} blocks to be spent, {spend_blocks} ' 'not enough')
def test_block_number_parents(self): address = get_address_from_public_key(self.genesis_public_key) output_script = P2PKH.create_output_script(address) tx_outputs = [TxOutput(100, output_script)] parents = [tx.hash for tx in self.genesis_txs] block = Block( nonce=100, outputs=tx_outputs, parents=parents, weight=1, # low weight so we don't waste time with PoW storage=self.tx_storage) block.resolve() with self.assertRaises(IncorrectParents): block.verify()
def test_block_with_htr_authority(self): parents = [tx.hash for tx in self.genesis] output_script = P2PKH.create_output_script(self.address) output = TxOutput(0b11, output_script, 0b10000000) self.assertTrue(output.is_token_authority()) block = Block( nonce=100, outputs=[output], parents=parents, weight=1, # low weight so we don't waste time with PoW storage=self.manager.tx_storage) block.resolve() with self.assertRaises(InvalidToken): block.verify()
def test_tokens_in_block(self): # a block with token index > 1 should be invalid parents = [tx.hash for tx in self.genesis] output_script = P2PKH.create_output_script(self.address) tx_outputs = [TxOutput(100, output_script, 1)] block = Block( nonce=100, outputs=tx_outputs, parents=parents, weight=1, # low weight so we don't waste time with PoW storage=self.manager.tx_storage) block.resolve() with self.assertRaises(BlockWithTokensError): block.verify()
def calculate_score(self, block: Block, *, mark_as_best_chain: bool = False) -> float: """ Calculate block's score, which is the accumulated work of the verified transactions and blocks. :param: mark_as_best_chain: If `True`, the transactions' will point `meta.first_block` to the blocks of the chain. """ assert block.storage is not None if block.is_genesis: if mark_as_best_chain: meta = block.get_metadata() meta.score = block.weight block.storage.save_transaction(block, only_metadata=True) return block.weight parent = self._find_first_parent_in_best_chain(block) newest_timestamp = parent.timestamp used: Set[bytes] = set() return self._score_block_dfs(block, used, mark_as_best_chain, newest_timestamp)
def test_block_unknown_parent(self): address = get_address_from_public_key(self.genesis_public_key) output_script = P2PKH.create_output_script(address) tx_outputs = [TxOutput(100, output_script)] # Random unknown parent parents = [hashlib.sha256().digest()] block = Block( nonce=100, outputs=tx_outputs, parents=parents, weight=1, # low weight so we don't waste time with PoW storage=self.tx_storage) block.resolve() with self.assertRaises(ParentDoesNotExist): block.verify()
def test_post_invalid_data(self): response_get = yield self.web.get('mining') data_get = response_get.json_value() block_bytes_str = data_get.get('block_bytes') block_bytes = base64.b64decode(block_bytes_str) block = Block.create_from_struct(block_bytes) block.weight = 4 block.resolve() block_bytes = bytes(block) block_bytes_str = base64.b64encode(block_bytes).decode('ascii') # missing post data response_post = yield self.web.post('mining') self.assertEqual(response_post.written[0], b'0') # invalid block bytes response_post = yield self.web.post('mining', {'block_bytes': base64.b64encode(b'aaa').decode('ascii')}) self.assertEqual(response_post.written[0], b'0') # invalid base64 response_post = yield self.web.post('mining', {'block_bytes': 'YWFha'}) self.assertEqual(response_post.written[0], b'0')
def test_block_inputs(self): # a block with inputs should be invalid parents = [tx.hash for tx in self.genesis] genesis_block = self.genesis_blocks[0] tx_inputs = [TxInput(genesis_block.hash, 0, b'')] address = get_address_from_public_key(self.genesis_public_key) output_script = P2PKH.create_output_script(address) tx_outputs = [TxOutput(100, output_script)] block = Block( nonce=100, outputs=tx_outputs, parents=parents, weight=1, # low weight so we don't waste time with PoW storage=self.tx_storage) block.inputs = tx_inputs block.resolve() with self.assertRaises(BlockWithInputs): block.verify()
class _TransactionStorageTest(unittest.TestCase): def setUp(self, tx_storage, reactor=None): if not reactor: self.reactor = Clock() else: self.reactor = reactor self.reactor.advance(time.time()) self.tx_storage = tx_storage tx_storage._manually_initialize() self.genesis = self.tx_storage.get_all_genesis() self.genesis_blocks = [tx for tx in self.genesis if tx.is_block] self.genesis_txs = [tx for tx in self.genesis if not tx.is_block] from hathor.manager import HathorManager self.tmpdir = tempfile.mkdtemp(dir='/tmp/') wallet = Wallet(directory=self.tmpdir) wallet.unlock(b'teste') self.manager = HathorManager(self.reactor, tx_storage=self.tx_storage, wallet=wallet) self.tx_storage.wallet_index = WalletIndex(self.manager.pubsub) self.tx_storage.tokens_index = TokensIndex() block_parents = [ tx.hash for tx in chain(self.genesis_blocks, self.genesis_txs) ] output = TxOutput( 200, bytes.fromhex('1e393a5ce2ff1c98d4ff6892f2175100f2dad049')) self.block = Block(timestamp=MIN_TIMESTAMP, weight=12, outputs=[output], parents=block_parents, nonce=100781, storage=tx_storage) self.block.resolve() self.block.verify() tx_parents = [tx.hash for tx in self.genesis_txs] tx_input = TxInput( tx_id=self.genesis_blocks[0].hash, index=0, data=bytes.fromhex( '46304402203470cb9818c9eb842b0c433b7e2b8aded0a51f5903e971649e870763d0266a' 'd2022049b48e09e718c4b66a0f3178ef92e4d60ee333d2d0e25af8868acf5acbb35aaa583' '056301006072a8648ce3d020106052b8104000a034200042ce7b94cba00b654d4308f8840' '7345cacb1f1032fb5ac80407b74d56ed82fb36467cb7048f79b90b1cf721de57e942c5748' '620e78362cf2d908e9057ac235a63')) self.tx = Transaction( timestamp=MIN_TIMESTAMP + 2, weight=10, nonce=932049, inputs=[tx_input], outputs=[output], tokens=[ bytes.fromhex( '0023be91834c973d6a6ddd1a0ae411807b7c8ef2a015afb5177ee64b666ce602' ) ], parents=tx_parents, storage=tx_storage) self.tx.resolve() # Disable weakref to test the internal methods. Otherwise, most methods return objects from weakref. self.tx_storage._disable_weakref() def tearDown(self): shutil.rmtree(self.tmpdir) def test_genesis(self): self.assertEqual(1, len(self.genesis_blocks)) self.assertEqual(2, len(self.genesis_txs)) for tx in self.genesis: tx.verify() for tx in self.genesis: tx2 = self.tx_storage.get_transaction(tx.hash) self.assertEqual(tx, tx2) self.assertTrue(self.tx_storage.transaction_exists(tx.hash)) def test_storage_basic(self): self.assertEqual(1, self.tx_storage.get_block_count()) self.assertEqual(2, self.tx_storage.get_tx_count()) self.assertEqual(3, self.tx_storage.get_count_tx_blocks()) block_parents_hash = [ x.data for x in self.tx_storage.get_block_tips() ] self.assertEqual(1, len(block_parents_hash)) self.assertEqual(block_parents_hash, [self.genesis_blocks[0].hash]) tx_parents_hash = [x.data for x in self.tx_storage.get_tx_tips()] self.assertEqual(2, len(tx_parents_hash)) self.assertEqual( set(tx_parents_hash), {self.genesis_txs[0].hash, self.genesis_txs[1].hash}) def validate_save(self, obj): self.tx_storage.save_transaction(obj) loaded_obj1 = self.tx_storage.get_transaction(obj.hash) self.assertTrue(self.tx_storage.transaction_exists(obj.hash)) self.assertEqual(obj, loaded_obj1) self.assertEqual(len(obj.get_funds_struct()), len(loaded_obj1.get_funds_struct())) self.assertEqual(bytes(obj), bytes(loaded_obj1)) self.assertEqual(obj.to_json(), loaded_obj1.to_json()) self.assertEqual(obj.is_block, loaded_obj1.is_block) # Testing add and remove from cache if self.tx_storage.with_index: if obj.is_block: self.assertTrue(obj.hash in self.tx_storage.block_index. tips_index.tx_last_interval) else: self.assertTrue(obj.hash in self.tx_storage.tx_index. tips_index.tx_last_interval) self.tx_storage._del_from_cache(obj) if self.tx_storage.with_index: if obj.is_block: self.assertFalse(obj.hash in self.tx_storage.block_index. tips_index.tx_last_interval) else: self.assertFalse(obj.hash in self.tx_storage.tx_index. tips_index.tx_last_interval) self.tx_storage._add_to_cache(obj) if self.tx_storage.with_index: if obj.is_block: self.assertTrue(obj.hash in self.tx_storage.block_index. tips_index.tx_last_interval) else: self.assertTrue(obj.hash in self.tx_storage.tx_index. tips_index.tx_last_interval) def test_save_block(self): self.validate_save(self.block) def test_save_tx(self): self.validate_save(self.tx) def test_save_token_creation_tx(self): tx = create_tokens(self.manager, propagate=False) self.validate_save(tx) def _validate_not_in_index(self, tx, index): tips = index.tips_index[self.tx.timestamp] self.assertNotIn(self.tx.hash, [x.data for x in tips]) self.assertNotIn(self.tx.hash, index.tips_index.tx_last_interval) self.assertIsNone(index.txs_index.find_tx_index(tx)) def _test_remove_tx_or_block(self, tx): self.validate_save(tx) self.tx_storage.remove_transaction(tx) with self.assertRaises(TransactionDoesNotExist): self.tx_storage.get_transaction(tx.hash) if hasattr(self.tx_storage, 'all_index'): self._validate_not_in_index(tx, self.tx_storage.all_index) if tx.is_block: if hasattr(self.tx_storage, 'block_index'): self._validate_not_in_index(tx, self.tx_storage.block_index) else: if hasattr(self.tx_storage, 'tx_index'): self._validate_not_in_index(tx, self.tx_storage.tx_index) # Check wallet index. wallet_index = self.tx_storage.wallet_index addresses = wallet_index._get_addresses(tx) for address in addresses: self.assertNotIn(tx.hash, wallet_index.index[address]) # TODO Check self.tx_storage.tokens_index # Try to remove twice. It is supposed to do nothing. self.tx_storage.remove_transaction(tx) def test_remove_tx(self): self._test_remove_tx_or_block(self.tx) def test_remove_block(self): self._test_remove_tx_or_block(self.block) def test_shared_memory(self): # Enable weakref to this test only. self.tx_storage._enable_weakref() self.validate_save(self.block) self.validate_save(self.tx) for tx in [self.tx, self.block]: # just making sure, if it is genesis the test is wrong self.assertFalse(tx.is_genesis) # load transactions twice tx1 = self.tx_storage.get_transaction(tx.hash) tx2 = self.tx_storage.get_transaction(tx.hash) # naturally they should be equal, but this time so do the objects self.assertTrue(tx1 == tx2) self.assertTrue(tx1 is tx2) meta1 = tx1.get_metadata() meta2 = tx2.get_metadata() # and naturally the metadata too self.assertTrue(meta1 == meta2) self.assertTrue(meta1 is meta2) def test_get_wrong_tx(self): hex_error = bytes.fromhex( '00001c5c0b69d13b05534c94a69b2c8272294e6b0c536660a3ac264820677024' ) with self.assertRaises(TransactionDoesNotExist): self.tx_storage.get_transaction(hex_error) def test_save_metadata(self): # Saving genesis metadata self.tx_storage.save_transaction(self.genesis_txs[0], only_metadata=True) tx = self.block # First we save to the storage self.tx_storage.save_transaction(tx) metadata = tx.get_metadata() metadata.spent_outputs[1].append(self.genesis_blocks[0].hash) random_tx = bytes.fromhex( '0000222e64683b966b4268f387c269915cc61f6af5329823a93e3696cb0f2222' ) metadata.children.append(random_tx) self.tx_storage.save_transaction(tx, only_metadata=True) tx2 = self.tx_storage.get_transaction(tx.hash) metadata2 = tx2.get_metadata() self.assertEqual(metadata, metadata2) total = 0 for tx in self.tx_storage.get_all_transactions(): total += 1 self.assertEqual(total, 4) def test_storage_new_blocks(self): tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(tip_blocks, [self.genesis_blocks[0].hash]) block1 = self._add_new_block() tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(tip_blocks, [block1.hash]) block2 = self._add_new_block() tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(tip_blocks, [block2.hash]) # Block3 has the same parents as block2. block3 = self._add_new_block(parents=block2.parents) tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(set(tip_blocks), {block2.hash, block3.hash}) # Re-generate caches to test topological sort. self.tx_storage._manually_initialize() tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(set(tip_blocks), {block2.hash, block3.hash}) def test_token_list(self): tx = self.tx self.validate_save(tx) # 2 token uids tx.tokens.append( bytes.fromhex( '00001c5c0b69d13b05534c94a69b2c8272294e6b0c536660a3ac264820677024' )) tx.resolve() self.validate_save(tx) # no tokens tx.tokens = [] tx.resolve() self.validate_save(tx) def _add_new_block(self, parents=None): block = self.manager.generate_mining_block() block.data = b'Testing, testing, 1, 2, 3... testing, testing...' if parents is not None: block.parents = parents block.weight = 10 self.assertTrue(block.resolve()) block.verify() self.manager.tx_storage.save_transaction(block) self.reactor.advance(5) return block def test_topological_sort(self): self.manager.test_mode = TestMode.TEST_ALL_WEIGHT _total = 0 blocks = add_new_blocks(self.manager, 1, advance_clock=1) _total += len(blocks) blocks = add_blocks_unlock_reward(self.manager) _total += len(blocks) add_new_transactions(self.manager, 1, advance_clock=1)[0] total = 0 for tx in self.tx_storage._topological_sort(): total += 1 # added blocks + genesis txs + added tx self.assertEqual(total, _total + 3 + 1) def test_get_best_block_weight(self): block = self._add_new_block() weight = self.tx_storage.get_weight_best_block() self.assertEqual(block.weight, weight)
class BaseTransactionStorageTest(unittest.TestCase): __test__ = False def setUp(self, tx_storage, reactor=None): from hathor.manager import HathorManager if not reactor: self.reactor = Clock() else: self.reactor = reactor self.reactor.advance(time.time()) self.tx_storage = tx_storage assert tx_storage.first_timestamp > 0 tx_storage._manually_initialize() self.genesis = self.tx_storage.get_all_genesis() self.genesis_blocks = [tx for tx in self.genesis if tx.is_block] self.genesis_txs = [tx for tx in self.genesis if not tx.is_block] self.tmpdir = tempfile.mkdtemp() wallet = Wallet(directory=self.tmpdir) wallet.unlock(b'teste') self.manager = HathorManager(self.reactor, tx_storage=self.tx_storage, wallet=wallet) self.tx_storage.indexes.enable_address_index(self.manager.pubsub) self.tx_storage.indexes.enable_tokens_index() block_parents = [ tx.hash for tx in chain(self.genesis_blocks, self.genesis_txs) ] output = TxOutput(200, P2PKH.create_output_script(BURN_ADDRESS)) self.block = Block(timestamp=MIN_TIMESTAMP, weight=12, outputs=[output], parents=block_parents, nonce=100781, storage=tx_storage) self.block.resolve() self.block.verify() self.block.get_metadata().validation = ValidationState.FULL tx_parents = [tx.hash for tx in self.genesis_txs] tx_input = TxInput( tx_id=self.genesis_blocks[0].hash, index=0, data=bytes.fromhex( '46304402203470cb9818c9eb842b0c433b7e2b8aded0a51f5903e971649e870763d0266a' 'd2022049b48e09e718c4b66a0f3178ef92e4d60ee333d2d0e25af8868acf5acbb35aaa583' '056301006072a8648ce3d020106052b8104000a034200042ce7b94cba00b654d4308f8840' '7345cacb1f1032fb5ac80407b74d56ed82fb36467cb7048f79b90b1cf721de57e942c5748' '620e78362cf2d908e9057ac235a63')) self.tx = Transaction( timestamp=MIN_TIMESTAMP + 2, weight=10, nonce=932049, inputs=[tx_input], outputs=[output], tokens=[ bytes.fromhex( '0023be91834c973d6a6ddd1a0ae411807b7c8ef2a015afb5177ee64b666ce602' ) ], parents=tx_parents, storage=tx_storage) self.tx.resolve() self.tx.get_metadata().validation = ValidationState.FULL # Disable weakref to test the internal methods. Otherwise, most methods return objects from weakref. self.tx_storage._disable_weakref() self.tx_storage.enable_lock() def tearDown(self): shutil.rmtree(self.tmpdir) def test_genesis_ref(self): # Enable weakref to this test only. self.tx_storage._enable_weakref() genesis_set = set(self.tx_storage.get_all_genesis()) for tx in genesis_set: tx2 = self.tx_storage.get_transaction(tx.hash) self.assertTrue(tx is tx2) from hathor.transaction.genesis import _get_genesis_transactions_unsafe genesis_from_settings = _get_genesis_transactions_unsafe(None) for tx in genesis_from_settings: tx2 = self.tx_storage.get_transaction(tx.hash) self.assertTrue(tx is not tx2) for tx3 in genesis_set: self.assertTrue(tx is not tx3) if tx2 == tx3: self.assertTrue(tx2 is tx3) def test_genesis(self): self.assertEqual(1, len(self.genesis_blocks)) self.assertEqual(2, len(self.genesis_txs)) for tx in self.genesis: tx.verify() for tx in self.genesis: tx2 = self.tx_storage.get_transaction(tx.hash) self.assertEqual(tx, tx2) self.assertTrue(self.tx_storage.transaction_exists(tx.hash)) def test_get_empty_merklee_tree(self): # We use `first_timestamp - 1` to ensure that the merkle tree will be empty. self.tx_storage.get_merkle_tree(self.tx_storage.first_timestamp - 1) def test_first_timestamp(self): self.assertEqual(self.tx_storage.first_timestamp, min(x.timestamp for x in self.genesis)) def test_storage_basic(self): self.assertEqual(1, self.tx_storage.get_block_count()) self.assertEqual(2, self.tx_storage.get_tx_count()) self.assertEqual(3, self.tx_storage.get_count_tx_blocks()) block_parents_hash = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(1, len(block_parents_hash)) self.assertEqual(block_parents_hash, [self.genesis_blocks[0].hash]) tx_parents_hash = [x.data for x in self.tx_storage.get_tx_tips()] self.assertEqual(2, len(tx_parents_hash)) self.assertEqual(set(tx_parents_hash), {self.genesis_txs[0].hash, self.genesis_txs[1].hash}) def test_storage_basic_v2(self): self.assertEqual(1, self.tx_storage.get_block_count()) self.assertEqual(2, self.tx_storage.get_tx_count()) self.assertEqual(3, self.tx_storage.get_count_tx_blocks()) block_parents_hash = self.tx_storage.get_best_block_tips() self.assertEqual(1, len(block_parents_hash)) self.assertEqual(block_parents_hash, [self.genesis_blocks[0].hash]) tx_parents_hash = self.manager.get_new_tx_parents() self.assertEqual(2, len(tx_parents_hash)) self.assertEqual(set(tx_parents_hash), {self.genesis_txs[0].hash, self.genesis_txs[1].hash}) def validate_save(self, obj): self.tx_storage.save_transaction(obj, add_to_indexes=True) loaded_obj1 = self.tx_storage.get_transaction(obj.hash) self.assertTrue(self.tx_storage.transaction_exists(obj.hash)) self.assertEqual(obj, loaded_obj1) self.assertEqual(len(obj.get_funds_struct()), len(loaded_obj1.get_funds_struct())) self.assertEqual(bytes(obj), bytes(loaded_obj1)) self.assertEqual(obj.to_json(), loaded_obj1.to_json()) self.assertEqual(obj.is_block, loaded_obj1.is_block) # Testing add and remove from cache if self.tx_storage.with_index: if obj.is_block: self.assertTrue(obj.hash in self.tx_storage.indexes.block_tips. tx_last_interval) else: self.assertTrue(obj.hash in self.tx_storage.indexes.tx_tips. tx_last_interval) self.tx_storage.del_from_indexes(obj) if self.tx_storage.with_index: if obj.is_block: self.assertFalse(obj.hash in self.tx_storage.indexes. block_tips.tx_last_interval) else: self.assertFalse(obj.hash in self.tx_storage.indexes.tx_tips. tx_last_interval) self.tx_storage.add_to_indexes(obj) if self.tx_storage.with_index: if obj.is_block: self.assertTrue(obj.hash in self.tx_storage.indexes.block_tips. tx_last_interval) else: self.assertTrue(obj.hash in self.tx_storage.indexes.tx_tips. tx_last_interval) def test_save_block(self): self.validate_save(self.block) def test_save_tx(self): self.validate_save(self.tx) def test_save_token_creation_tx(self): tx = create_tokens(self.manager, propagate=False) tx.get_metadata().validation = ValidationState.FULL self.validate_save(tx) def _validate_not_in_index(self, tx, index): tips = index.tips_index[self.tx.timestamp] self.assertNotIn(self.tx.hash, [x.data for x in tips]) self.assertNotIn(self.tx.hash, index.tips_index.tx_last_interval) self.assertIsNone(index.txs_index.find_tx_index(tx)) def _test_remove_tx_or_block(self, tx): self.validate_save(tx) self.tx_storage.remove_transaction(tx) with self.assertRaises(TransactionDoesNotExist): self.tx_storage.get_transaction(tx.hash) if hasattr(self.tx_storage, 'all_index'): self._validate_not_in_index(tx, self.tx_storage.all_index) if tx.is_block: if hasattr(self.tx_storage, 'block_index'): self._validate_not_in_index(tx, self.tx_storage.block_index) else: if hasattr(self.tx_storage, 'tx_index'): self._validate_not_in_index(tx, self.tx_storage.tx_index) # Check wallet index. addresses_index = self.tx_storage.indexes.addresses addresses = tx.get_related_addresses() for address in addresses: self.assertNotIn(tx.hash, addresses_index.get_from_address(address)) # TODO Check self.tx_storage.tokens_index # Try to remove twice. It is supposed to do nothing. self.tx_storage.remove_transaction(tx) def test_remove_tx(self): self._test_remove_tx_or_block(self.tx) def test_remove_block(self): self._test_remove_tx_or_block(self.block) def test_shared_memory(self): # Enable weakref to this test only. self.tx_storage._enable_weakref() self.validate_save(self.block) self.validate_save(self.tx) for tx in [self.tx, self.block]: # just making sure, if it is genesis the test is wrong self.assertFalse(tx.is_genesis) # load transactions twice tx1 = self.tx_storage.get_transaction(tx.hash) tx2 = self.tx_storage.get_transaction(tx.hash) # naturally they should be equal, but this time so do the objects self.assertTrue(tx1 == tx2) self.assertTrue(tx1 is tx2) meta1 = tx1.get_metadata() meta2 = tx2.get_metadata() # and naturally the metadata too self.assertTrue(meta1 == meta2) self.assertTrue(meta1 is meta2) def test_get_wrong_tx(self): hex_error = bytes.fromhex( '00001c5c0b69d13b05534c94a69b2c8272294e6b0c536660a3ac264820677024') with self.assertRaises(TransactionDoesNotExist): self.tx_storage.get_transaction(hex_error) def test_save_metadata(self): # Saving genesis metadata self.tx_storage.save_transaction(self.genesis_txs[0], only_metadata=True) tx = self.block # First we save to the storage self.tx_storage.save_transaction(tx) metadata = tx.get_metadata() metadata.spent_outputs[1].append(self.genesis_blocks[0].hash) random_tx = bytes.fromhex( '0000222e64683b966b4268f387c269915cc61f6af5329823a93e3696cb0f2222') metadata.children.append(random_tx) self.tx_storage.save_transaction(tx, only_metadata=True) tx2 = self.tx_storage.get_transaction(tx.hash) metadata2 = tx2.get_metadata() self.assertEqual(metadata, metadata2) total = 0 for tx in self.tx_storage.get_all_transactions(): total += 1 self.assertEqual(total, 4) def test_storage_new_blocks(self): tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(tip_blocks, [self.genesis_blocks[0].hash]) block1 = self._add_new_block() tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(tip_blocks, [block1.hash]) block2 = self._add_new_block() tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(tip_blocks, [block2.hash]) # Block3 has the same parents as block2. block3 = self._add_new_block(parents=block2.parents) tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(set(tip_blocks), {block2.hash, block3.hash}) # Re-generate caches to test topological sort. self.tx_storage._manually_initialize() tip_blocks = [x.data for x in self.tx_storage.get_block_tips()] self.assertEqual(set(tip_blocks), {block2.hash, block3.hash}) def test_token_list(self): tx = self.tx self.validate_save(tx) # 2 token uids tx.tokens.append( bytes.fromhex( '00001c5c0b69d13b05534c94a69b2c8272294e6b0c536660a3ac264820677024' )) tx.resolve() self.validate_save(tx) # no tokens tx.tokens = [] tx.resolve() self.validate_save(tx) def _add_new_block(self, parents=None): block = self.manager.generate_mining_block() block.data = b'Testing, testing, 1, 2, 3... testing, testing...' if parents is not None: block.parents = parents block.weight = 10 self.assertTrue(block.resolve()) block.verify() self.manager.propagate_tx(block, fails_silently=False) self.reactor.advance(5) return block def test_topological_sort(self): _set_test_mode(TestMode.TEST_ALL_WEIGHT) _total = 0 blocks = add_new_blocks(self.manager, 1, advance_clock=1) _total += len(blocks) blocks = add_blocks_unlock_reward(self.manager) _total += len(blocks) add_new_transactions(self.manager, 1, advance_clock=1) total = 0 for tx in self.tx_storage._topological_sort(): total += 1 # added blocks + genesis txs + added tx self.assertEqual(total, _total + 3 + 1) def test_get_best_block_weight(self): block = self._add_new_block() weight = self.tx_storage.get_weight_best_block() self.assertEqual(block.weight, weight) @inlineCallbacks def test_concurrent_access(self): self.tx_storage.save_transaction(self.tx) self.tx_storage._enable_weakref() def handle_error(err): self.fail( 'Error resolving concurrent access deferred. {}'.format(err)) deferreds = [] for i in range(5): d = deferToThread(self.tx_storage.get_transaction, self.tx.hash) d.addErrback(handle_error) deferreds.append(d) self.reactor.advance(3) yield gatherResults(deferreds) self.tx_storage._disable_weakref() def test_full_verification_attribute(self): self.assertFalse(self.tx_storage.is_running_full_verification()) self.tx_storage.start_full_verification() self.assertTrue(self.tx_storage.is_running_full_verification()) self.tx_storage.finish_full_verification() self.assertFalse(self.tx_storage.is_running_full_verification()) def test_key_value_attribute(self): attr = 'test' val = 'a' # Try to get a key that does not exist self.assertIsNone(self.tx_storage.get_value(attr)) # Try to remove this key that does not exist self.tx_storage.remove_value(attr) # Add the key/value self.tx_storage.add_value(attr, val) # Get correct value self.assertEqual(self.tx_storage.get_value(attr), val) # Remove the key self.tx_storage.remove_value(attr) # Key should not exist again self.assertIsNone(self.tx_storage.get_value(attr))
limitations under the License. """ from typing import List, Optional from hathor.conf import HathorSettings from hathor.transaction import BaseTransaction, Block, Transaction, TxOutput from hathor.transaction.storage import TransactionStorage settings = HathorSettings() BLOCK_GENESIS = Block( hash=settings.GENESIS_BLOCK_HASH, nonce=settings.GENESIS_BLOCK_NONCE, timestamp=settings.GENESIS_TIMESTAMP, weight=settings.MIN_BLOCK_WEIGHT, outputs=[ TxOutput(settings.GENESIS_TOKENS, settings.GENESIS_OUTPUT_SCRIPT), ], ) TX_GENESIS1 = Transaction( hash=settings.GENESIS_TX1_HASH, nonce=settings.GENESIS_TX1_NONCE, timestamp=settings.GENESIS_TIMESTAMP + 1, weight=settings.MIN_TX_WEIGHT, ) TX_GENESIS2 = Transaction( hash=settings.GENESIS_TX2_HASH, nonce=settings.GENESIS_TX2_NONCE,
def _make_block_template(self, parent_block: Block, parent_txs: 'ParentTxs', current_timestamp: int, with_weight_decay: bool = False) -> BlockTemplate: """ Further implementation of making block template, used by make_block_template and make_custom_block_template """ assert parent_block.hash is not None # the absolute minimum would be the previous timestamp + 1 timestamp_abs_min = parent_block.timestamp + 1 # and absolute maximum limited by max time between blocks if not parent_block.is_genesis: timestamp_abs_max = parent_block.timestamp + settings.MAX_DISTANCE_BETWEEN_BLOCKS - 1 else: timestamp_abs_max = 0xffffffff assert timestamp_abs_max > timestamp_abs_min # actual minimum depends on the timestamps of the parent txs # it has to be at least the max timestamp of parents + 1 timestamp_min = max(timestamp_abs_min, parent_txs.max_timestamp + 1) assert timestamp_min <= timestamp_abs_max # when we have weight decay, the max timestamp will be when the next decay happens if with_weight_decay and settings.WEIGHT_DECAY_ENABLED: # we either have passed the first decay or not, the range will vary depending on that if timestamp_min > timestamp_abs_min + settings.WEIGHT_DECAY_ACTIVATE_DISTANCE: timestamp_max_decay = timestamp_min + settings.WEIGHT_DECAY_WINDOW_SIZE else: timestamp_max_decay = timestamp_abs_min + settings.WEIGHT_DECAY_ACTIVATE_DISTANCE timestamp_max = min(timestamp_abs_max, timestamp_max_decay) else: timestamp_max = timestamp_abs_max timestamp = min(max(current_timestamp, timestamp_min), timestamp_max) weight = daa.calculate_next_weight(parent_block, timestamp) parent_block_metadata = parent_block.get_metadata() height = parent_block_metadata.height + 1 parents = [parent_block.hash] + parent_txs.must_include parents_any = parent_txs.can_include # simplify representation when you only have one to choose from if len(parents) + len(parents_any) == 3: parents.extend(sorted(parents_any)) parents_any = [] assert len(parents) + len( parents_any) >= 3, 'There should be enough parents to choose from' assert 1 <= len(parents) <= 3, 'Impossible number of parents' if __debug__ and len(parents) == 3: assert len( parents_any ) == 0, 'Extra parents to choose from that cannot be chosen' return BlockTemplate( versions={ TxVersion.REGULAR_BLOCK.value, TxVersion.MERGE_MINED_BLOCK.value }, reward=daa.get_tokens_issued_per_block(height), weight=weight, timestamp_now=current_timestamp, timestamp_min=timestamp_min, timestamp_max=timestamp_max, parents=parents, parents_any=parents_any, height=height, score=sum_weights(parent_block_metadata.score, weight), )
def update_voided_info(self, block: Block) -> None: """ This method is called only once when a new block arrives. The blockchain part of the DAG is a tree with the genesis block as the root. I'll say the a block A is connected to a block B when A verifies B, i.e., B is a parent of A. A chain is a sequence of connected blocks starting in a leaf and ending in the root, i.e., any path from a leaf to the root is a chain. Given a chain, its head is a leaf in the tree, and its tail is the sub-chain without the head. The best chain is a chain that has the highest score of all chains. The score of a block is calculated as the sum of the weights of all transactions and blocks both direcly and indirectly verified by the block. The score of a chain is defined as the score of its head. The side chains are the chains whose scores are smaller than the best chain's. The head of the side chains are always voided blocks. There are two possible states for the block chain: (i) It has a single best chain, i.e., one chain has the highest score (ii) It has multiple best chains, i.e., two or more chains have the same score (and this score is the highest among the chains) When there are multiple best chains, I'll call them best chain candidates. The arrived block can be connected in four possible ways: (i) To the head of a best chain (ii) To the tail of the best chain (iii) To the head of a side chain (iv) To the tail of a side chain Thus, there are eight cases to be handled when a new block arrives, which are: (i) Single best chain, connected to the head of the best chain (ii) Single best chain, connected to the tail of the best chain (iii) Single best chain, connected to the head of a side chain (iv) Single best chain, connected to the tail of a side chain (v) Multiple best chains, connected to the head of a best chain (vi) Multiple best chains, connected to the tail of a best chain (vii) Multiple best chains, connected to the head of a side chain (viii) Multiple best chains, connected to the tail of a side chain Case (i) is trivial because the single best chain will remain as the best chain. So, just calculate the new score and that's it. Case (v) is also trivial. As there are multiple best chains and the new block is connected to the head of one of them, this will be the new winner. So, the blockchain state will change to a single best chain again. In the other cases, we must calculate the score and compare with the best score. When there are multiple best chains, all their heads will be voided. """ assert block.weight > 0, 'This algorithm assumes that block\'s weight is always greater than zero' if not block.parents: assert block.is_genesis is True self.update_score_and_mark_as_the_best_chain(block) return assert block.storage is not None assert block.hash is not None storage = block.storage assert storage.indexes is not None # Union of voided_by of parents voided_by: Set[bytes] = self.union_voided_by_from_parents(block) # Update accumulated weight of the transactions voiding us. assert block.hash not in voided_by for h in voided_by: tx = storage.get_transaction(h) tx_meta = tx.get_metadata() tx_meta.accumulated_weight = sum_weights( tx_meta.accumulated_weight, block.weight) storage.save_transaction(tx, only_metadata=True) # Check conflicts of the transactions voiding us. for h in voided_by: tx = storage.get_transaction(h) if not tx.is_block: assert isinstance(tx, Transaction) self.consensus.transaction_algorithm.check_conflicts(tx) parent = block.get_block_parent() parent_meta = parent.get_metadata() assert block.hash in parent_meta.children # This method is called after the metadata of the parent is updated. # So, if the parent has only one child, it must be the current block. is_connected_to_the_head = bool(len(parent_meta.children) == 1) is_connected_to_the_best_chain = bool(not parent_meta.voided_by) if is_connected_to_the_head and is_connected_to_the_best_chain: # Case (i): Single best chain, connected to the head of the best chain self.update_score_and_mark_as_the_best_chain_if_possible(block) # As `update_score_and_mark_as_the_best_chain_if_possible` may affect `voided_by`, # we need to check that block is not voided. meta = block.get_metadata() if not meta.voided_by: storage.indexes.height.add_new(meta.height, block.hash, block.timestamp) storage.update_best_block_tips_cache([block.hash]) # The following assert must be true, but it is commented out for performance reasons. if settings.SLOW_ASSERTS: assert len(storage.get_best_block_tips(skip_cache=True)) == 1 else: # Resolve all other cases, but (i). log = self.log.new(block=block.hash_hex) log.debug( 'this block is not the head of the bestchain', is_connected_to_the_head=is_connected_to_the_head, is_connected_to_the_best_chain=is_connected_to_the_best_chain) # First, void this block. self.mark_as_voided(block, skip_remove_first_block_markers=True) # Get the score of the best chains. # We need to void this block first, because otherwise it would always be one of the heads. heads = [ cast(Block, storage.get_transaction(h)) for h in storage.get_best_block_tips() ] best_score = None for head in heads: head_meta = head.get_metadata(force_reload=True) if best_score is None: best_score = head_meta.score else: # All heads must have the same score. assert abs(best_score - head_meta.score) < 1e-10 assert isinstance(best_score, (int, float)) # Calculate the score. # We cannot calculate score before getting the heads. score = self.calculate_score(block) # Finally, check who the winner is. if score <= best_score - settings.WEIGHT_TOL: # Just update voided_by from parents. self.update_voided_by_from_parents(block) else: # Either eveyone has the same score or there is a winner. valid_heads = [] for head in heads: meta = head.get_metadata() if not meta.voided_by: valid_heads.append(head) # We must have at most one valid head. # Either we have a single best chain or all chains have already been voided. assert len( valid_heads ) <= 1, 'We must never have more than one valid head' # Add voided_by to all heads. self.add_voided_by_to_multiple_chains(block, heads) if score >= best_score + settings.WEIGHT_TOL: # We have a new winner candidate. self.update_score_and_mark_as_the_best_chain_if_possible( block) # As `update_score_and_mark_as_the_best_chain_if_possible` may affect `voided_by`, # we need to check that block is not voided. meta = block.get_metadata() if not meta.voided_by: self.log.debug('index new winner block', height=meta.height, block=block.hash_hex) # We update the height cache index with the new winner chain storage.indexes.height.update_new_chain( meta.height, block) storage.update_best_block_tips_cache([block.hash]) else: storage.update_best_block_tips_cache( [not_none(blk.hash) for blk in heads])