def test_recover_merkle_tree_from_txn_log_if_hash_store_lags_behind(create_ledger_callable, tempdir, txn_serializer, hash_serializer, genesis_txn_file): ''' Check that tree can be recovered from txn log if recovering from hash store failed (we have one more txn in txn log than in hash store, so consistency verification fails). ''' ledger = create_ledger_callable( txn_serializer, hash_serializer, tempdir, genesis_txn_file) for d in range(100): ledger.add(random_txn(d)) # add to txn log only ledger._addToStore(ledger.serialize_for_txn_log(random_txn(50)), serialized=True) ledger.stop() size_before = ledger.size tree_size_before = ledger.tree.tree_size restartedLedger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) # root hashes will be not the same as before (since we recoverd based on txn log) # the new size is 1 greater than before since we recovered from txn log which contained one more txn assert size_before + 1 == restartedLedger.size assert tree_size_before + 1 == restartedLedger.tree.tree_size
def test_recover_merkle_tree_from_txn_log_if_hash_store_runs_ahead( create_ledger_callable, tempdir, txn_serializer, hash_serializer, genesis_txn_file): ''' Check that tree can be recovered from txn log if recovering from hash store failed (we have one more txn in hash store than in txn log, so consistency verification fails). ''' ledger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) for d in range(5): ledger.add(random_txn(d)) size_before = ledger.size tree_root_hash_before = ledger.tree.root_hash tree_size_before = ledger.tree.tree_size root_hash_before = ledger.root_hash hashes_before = ledger.tree.hashes # add to hash store only ledger._addToTree(ledger.serialize_for_tree(random_txn(50)), serialized=True) ledger.stop() restartedLedger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) assert size_before == restartedLedger.size assert root_hash_before == restartedLedger.root_hash assert hashes_before == restartedLedger.tree.hashes assert tree_root_hash_before == restartedLedger.tree.root_hash assert tree_size_before == restartedLedger.tree.tree_size
def test_recover_merkle_tree_from_txn_log_if_hash_store_lags_behind( create_ledger_callable, tempdir, txn_serializer, hash_serializer, genesis_txn_file): ''' Check that tree can be recovered from txn log if recovering from hash store failed (we have one more txn in txn log than in hash store, so consistency verification fails). ''' ledger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) for d in range(100): ledger.add(random_txn(d)) # add to txn log only ledger._addToStore(ledger.serialize_for_txn_log(random_txn(50)), serialized=True) ledger.stop() size_before = ledger.size tree_size_before = ledger.tree.tree_size restartedLedger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) # root hashes will be not the same as before (since we recoverd based on txn log) # the new size is 1 greater than before since we recovered from txn log which contained one more txn assert size_before + 1 == restartedLedger.size assert tree_size_before + 1 == restartedLedger.tree.tree_size
def test_recover_merkle_tree_from_txn_log_if_hash_store_runs_ahead(create_ledger_callable, tempdir, txn_serializer, hash_serializer, genesis_txn_file): ''' Check that tree can be recovered from txn log if recovering from hash store failed (we have one more txn in hash store than in txn log, so consistency verification fails). ''' ledger = create_ledger_callable( txn_serializer, hash_serializer, tempdir, genesis_txn_file) for d in range(5): ledger.add(random_txn(d)) size_before = ledger.size tree_root_hash_before = ledger.tree.root_hash tree_size_before = ledger.tree.tree_size root_hash_before = ledger.root_hash hashes_before = ledger.tree.hashes # add to hash store only ledger._addToTree(ledger.serialize_for_tree(random_txn(50)), serialized=True) ledger.stop() restartedLedger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) assert size_before == restartedLedger.size assert root_hash_before == restartedLedger.root_hash assert hashes_before == restartedLedger.tree.hashes assert tree_root_hash_before == restartedLedger.tree.root_hash assert tree_size_before == restartedLedger.tree.tree_size
def test_add_txn(ledger, genesis_txns, genesis_txn_file): offset = len(genesis_txns) if genesis_txn_file else 0 txn1 = random_txn(1) txn2 = random_txn(2) ledger.add(txn1) ledger.add(txn2) # Check that the transaction is added to the Merkle Tree assert ledger.size == 2 + offset # Check that the data is appended to the immutable store assert sorted(txn1.items()) == sorted(ledger[1 + offset].items()) assert sorted(txn2.items()) == sorted(ledger[2 + offset].items()) check_ledger_generator(ledger)
def test_recover_merkle_tree_from_txn_log(create_ledger_callable, tempdir, txn_serializer, hash_serializer, genesis_txn_file): ledger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) for d in range(5): ledger.add(random_txn(d)) # delete hash store, so that the only option for recovering is txn log ledger.tree.hashStore.reset() ledger.stop() size_before = ledger.size tree_root_hash_before = ledger.tree.root_hash tree_size_before = ledger.tree.tree_size root_hash_before = ledger.root_hash hashes_before = ledger.tree.hashes restartedLedger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) assert size_before == restartedLedger.size assert root_hash_before == restartedLedger.root_hash assert hashes_before == restartedLedger.tree.hashes assert tree_root_hash_before == restartedLedger.tree.root_hash assert tree_size_before == restartedLedger.tree.tree_size
def test_stop_start(ledger, genesis_txns, genesis_txn_file): offset = len(genesis_txns) if genesis_txn_file else 0 txn1 = random_txn(1) ledger.add(txn1) assert ledger.size == 1 + offset # stop the ledger ledger.stop() # Check that can not add new txn for stopped ledger txn2 = random_txn(2) with pytest.raises(Exception): ledger.add(txn2) # Check that the transaction is added to the Merkle Tree after re-start ledger.start() ledger.add(txn2) assert ledger.size == 2 + offset assert sorted(txn2.items()) == sorted(ledger[2 + offset].items())
def test_query_merkle_info(ledger, genesis_txns, genesis_txn_file): offset = len(genesis_txns) if genesis_txn_file else 0 merkleInfo = {} for i in range(100): mi = ledger.add(random_txn(i)) seqNo = mi.pop(F.seqNo.name) assert i + 1 + offset == seqNo merkleInfo[seqNo] = mi for i in range(100): assert sorted(merkleInfo[i + 1 + offset].items()) == \ sorted(ledger.merkleInfo(i + 1 + offset).items())
def test_consistency_verification_on_startup_case_2(tempdir): """ One more transaction added to transactions file """ ledger = create_default_ledger(tempdir) tranzNum = 10 for d in range(tranzNum): ledger.add(random_txn(d)) # Adding one more entry to transaction log without adding it to merkle tree badData = random_txn(50) value = ledger.serialize_for_txn_log(badData) key = str(tranzNum + 1) ledger._transactionLog.put(key=key, value=value) ledger.stop() with pytest.raises(ConsistencyVerificationFailed): tree = CompactMerkleTree(hashStore=ledger.tree.hashStore) ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir) ledger.recoverTreeFromHashStore() ledger.stop()
def test_missing_txn_request(ledger_no_genesis): """ Testing LedgerManager's `_missing_txns` """ ledger = ledger_no_genesis for i in range(20): txn = random_txn(i) ledger.add(txn) service = create_fake_catchup_rep_service(ledger) assert service._num_missing_txns() == 0 # Ledger is already ahead ct = CatchupTill(start_size=1, final_size=10, final_hash='Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', view_no=0, pp_seq_no=0) service._catchup_till = ct service._received_catchup_txns = [(i, {}) for i in range(1, 15)] assert service._num_missing_txns() == 0 # Ledger is behind but catchup replies present ct = CatchupTill(start_size=1, final_size=30, final_hash='EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', view_no=0, pp_seq_no=0) service._catchup_till = ct service._received_catchup_txns = [(i, {}) for i in range(21, 31)] assert service._num_missing_txns() == 0 service._received_catchup_txns = [(i, {}) for i in range(21, 35)] assert service._num_missing_txns() == 0 # Ledger is behind ct = CatchupTill(start_size=1, final_size=30, final_hash='EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', view_no=0, pp_seq_no=0) service._catchup_till = ct service._received_catchup_txns = [(i, {}) for i in range(21, 26)] assert service._num_missing_txns() == 5 service._received_catchup_txns = [(i, {}) for i in range(26, 31)] assert service._num_missing_txns() == 5
def test_recover_merkle_tree_from_hash_store(tempdir): ledger = create_default_ledger(tempdir) for d in range(100): ledger.add(random_txn(d)) ledger.stop() size_before = ledger.size tree_root_hash_before = ledger.tree.root_hash tree_size_before = ledger.tree.tree_size root_hash_before = ledger.root_hash hashes_before = ledger.tree.hashes restartedLedger = create_default_ledger(tempdir) assert size_before == restartedLedger.size assert root_hash_before == restartedLedger.root_hash assert hashes_before == restartedLedger.tree.hashes assert tree_root_hash_before == restartedLedger.tree.root_hash assert tree_size_before == restartedLedger.tree.tree_size
def test_consistency_verification_on_startup_case_1(tempdir): """ One more node was added to nodes file """ ledger = create_default_ledger(tempdir) tranzNum = 10 for d in range(tranzNum): ledger.add(random_txn(d)) # Writing one more node without adding of it to leaf and transaction logs badNode = (None, None, ('X' * 32)) ledger.tree.hashStore.writeNode(badNode) ledger.stop() with pytest.raises(ConsistencyVerificationFailed): tree = CompactMerkleTree(hashStore=ledger.tree.hashStore) ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir) ledger.recoverTreeFromHashStore() ledger.stop()
def test_recover_ledger_new_fields_to_txns_added(tempdir): ledger = create_ledger_text_file_storage(CompactSerializer(orderedFields), None, tempdir) for d in range(5): ledger.add(random_txn(d)) updatedTree = ledger.tree ledger.stop() newOrderedFields = OrderedDict([("identifier", (str, str)), ("reqId", (str, int)), ("op", (str, str)), ("newField", (str, str))]) restartedLedger = create_ledger_text_file_storage( CompactSerializer(newOrderedFields), None, tempdir) assert restartedLedger.size == ledger.size assert restartedLedger.root_hash == ledger.root_hash assert restartedLedger.tree.hashes == updatedTree.hashes assert restartedLedger.tree.root_hash == updatedTree.root_hash
def test_missing_txn_request(ledger_no_genesis): """ Testing LedgerManager's `_missing_txns` """ ledger = ledger_no_genesis for i in range(20): txn = random_txn(i) ledger.add(txn) # Callbacks don't matter in this test ledger_info = LedgerInfo(0, ledger, *[None] * 6) assert ledger_info.catchupReplyTimer is None assert LedgerManager._missing_txns(ledger_info) == (False, 0) ledger_info.catchupReplyTimer = time.perf_counter() # Ledger is already ahead cp = ConsistencyProof(0, 1, 10, 1, 1, 'GJybBTHjzMzPWsE6n9qNQWAmhJP88dTcdbgkGLhYGFYn', 'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', []) ledger_info.catchUpTill = cp ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(1, 15)] assert not LedgerManager._missing_txns(ledger_info)[0] # Ledger is behind but catchup replies present cp = ConsistencyProof(0, 1, 30, 1, 1, 'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', 'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', []) ledger_info.catchUpTill = cp ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 31)] assert not LedgerManager._missing_txns(ledger_info)[0] ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 35)] assert not LedgerManager._missing_txns(ledger_info)[0] # Ledger is behind cp = ConsistencyProof(0, 1, 30, 1, 1, 'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', 'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', []) ledger_info.catchUpTill = cp ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(21, 26)] assert LedgerManager._missing_txns(ledger_info) == (True, 5) ledger_info.receivedCatchUpReplies = [(i, {}) for i in range(26, 31)] assert LedgerManager._missing_txns(ledger_info) == (True, 5)
def test_recover_ledger_new_fields_to_txns_added(tempdir): ledger = create_ledger_text_file_storage( CompactSerializer(orderedFields), None, tempdir) for d in range(5): ledger.add(random_txn(d)) updatedTree = ledger.tree ledger.stop() newOrderedFields = OrderedDict([ ("identifier", (str, str)), ("reqId", (str, int)), ("op", (str, str)), ("newField", (str, str)) ]) restartedLedger = create_ledger_text_file_storage( CompactSerializer(newOrderedFields), None, tempdir) assert restartedLedger.size == ledger.size assert restartedLedger.root_hash == ledger.root_hash assert restartedLedger.tree.hashes == updatedTree.hashes assert restartedLedger.tree.root_hash == updatedTree.root_hash
def test_missing_txn_request(ledger_no_genesis): """ Testing LedgerManager's `_missing_txns` """ ledger = ledger_no_genesis for i in range(20): txn = random_txn(i) ledger.add(txn) service = create_fake_catchup_rep_service(ledger) assert service._num_missing_txns() == 0 # Ledger is already ahead cp = ConsistencyProof(0, 1, 10, 1, 1, 'GJybBTHjzMzPWsE6n9qNQWAmhJP88dTcdbgkGLhYGFYn', 'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', []) service._catchup_till = cp service._received_catchup_txns = [(i, {}) for i in range(1, 15)] assert service._num_missing_txns() == 0 # Ledger is behind but catchup replies present cp = ConsistencyProof(0, 1, 30, 1, 1, 'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', 'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', []) service._catchup_till = cp service._received_catchup_txns = [(i, {}) for i in range(21, 31)] assert service._num_missing_txns() == 0 service._received_catchup_txns = [(i, {}) for i in range(21, 35)] assert service._num_missing_txns() == 0 # Ledger is behind cp = ConsistencyProof(0, 1, 30, 1, 1, 'Gv9AdSeib9EnBakfpgkU79dPMtjcnFWXvXeiCX4QAgAC', 'EEUnqHf2GWEpvmibiXDCZbNDSpuRgqdvCpJjgp3KFbNC', []) service._catchup_till = cp service._received_catchup_txns = [(i, {}) for i in range(21, 26)] assert service._num_missing_txns() == 5 service._received_catchup_txns = [(i, {}) for i in range(26, 31)] assert service._num_missing_txns() == 5
def test_recover_merkle_tree_from_hash_store(create_ledger_callable, tempdir, txn_serializer, hash_serializer, genesis_txn_file): ledger = create_ledger_callable( txn_serializer, hash_serializer, tempdir, genesis_txn_file) for d in range(5): ledger.add(random_txn(d)) ledger.stop() size_before = ledger.size tree_root_hash_before = ledger.tree.root_hash tree_size_before = ledger.tree.tree_size root_hash_before = ledger.root_hash hashes_before = ledger.tree.hashes restartedLedger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) assert size_before == restartedLedger.size assert root_hash_before == restartedLedger.root_hash assert hashes_before == restartedLedger.tree.hashes assert tree_root_hash_before == restartedLedger.tree.root_hash assert tree_size_before == restartedLedger.tree.tree_size
def test_recover_merkle_tree_from_txn_log(create_ledger_callable, tempdir, txn_serializer, hash_serializer, genesis_txn_file): ledger = create_ledger_callable( txn_serializer, hash_serializer, tempdir, genesis_txn_file) for d in range(5): ledger.add(random_txn(d)) # delete hash store, so that the only option for recovering is txn log ledger.tree.hashStore.reset() ledger.stop() size_before = ledger.size tree_root_hash_before = ledger.tree.root_hash tree_size_before = ledger.tree.tree_size root_hash_before = ledger.root_hash hashes_before = ledger.tree.hashes restartedLedger = create_ledger_callable(txn_serializer, hash_serializer, tempdir, genesis_txn_file) assert size_before == restartedLedger.size assert root_hash_before == restartedLedger.root_hash assert hashes_before == restartedLedger.tree.hashes assert tree_root_hash_before == restartedLedger.tree.root_hash assert tree_size_before == restartedLedger.tree.tree_size