def testRecoverLedgerNewFieldsToTxnsAdded(tempdir): fhs = FileHashStore(tempdir) tree = CompactMerkleTree(hashStore=fhs) ledger = Ledger(tree=tree, dataDir=tempdir, serializer=ledgerSerializer) for d in range(10): ledger.add({ "identifier": "i{}".format(d), "reqId": d, "op": "operation" }) updatedTree = ledger.tree ledger.stop() newOrderedFields = OrderedDict([("identifier", (str, str)), ("reqId", (str, int)), ("op", (str, str)), ("newField", (str, str))]) newLedgerSerializer = CompactSerializer(newOrderedFields) tree = CompactMerkleTree(hashStore=fhs) restartedLedger = Ledger(tree=tree, dataDir=tempdir, serializer=newLedgerSerializer) assert restartedLedger.size == ledger.size assert restartedLedger.root_hash == ledger.root_hash assert restartedLedger.tree.hashes == updatedTree.hashes assert restartedLedger.tree.root_hash == updatedTree.root_hash
def __migrate_ledger(data_directory, old_ledger_file, new_ledger_file, serializer: MappingSerializer = None): """ Test for the directory, open old and new ledger, migrate data, rename directories """ # we should have ChunkedFileStorage implementation of the Ledger if not os.path.isdir(os.path.join(data_directory, old_ledger_file)): msg = 'Could not find directory {} for migration.'.format( old_ledger_file) logger.error(msg) raise Exception(msg) # open the old ledger using the specified serializer old_ledger_file_backup = old_ledger_file + "_new" old_txn_log_store = ChunkedFileStore(data_directory, old_ledger_file_backup, isLineNoKey=True, storeContentHash=False) old_ledger = Ledger(CompactMerkleTree(), dataDir=data_directory, txn_serializer=serializer, hash_serializer=serializer, fileName=old_ledger_file_backup, transactionLogStore=old_txn_log_store) # open the new ledger with new serialization new_ledger = Ledger(CompactMerkleTree(), dataDir=data_directory, fileName=new_ledger_file) logger.info("new size for {}: {}".format( old_ledger_file_backup, str(new_ledger.size))) # add all txns into the old ledger for _, txn in new_ledger.getAllTxn(): old_ledger.add(txn) logger.info("old size for {}: {}".format( new_ledger_file, str(old_ledger.size))) old_ledger.stop() new_ledger.stop() # now that everything succeeded, remove the new files and move the old # files into place shutil.rmtree( os.path.join(data_directory, new_ledger_file)) os.rename( os.path.join(data_directory, old_ledger_file_backup), os.path.join(data_directory, old_ledger_file))
def invalid_identifier_tdir(tdir_for_func): tree = CompactMerkleTree() ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func) txn = {TXN_TYPE: '0', TARGET_NYM: base58.b58encode(b'whatever'), IDENTIFIER: "invalid====", DATA: { NAME: str(2), ALIAS: 'test' + str(2), SERVICES: [VALIDATOR], } } ledger.add(txn) ledger.stop()
def testRecoverLedgerFromHashStore(tempdir): fhs = FileHashStore(tempdir) tree = CompactMerkleTree(hashStore=fhs) ledger = Ledger(tree=tree, dataDir=tempdir) for d in range(10): ledger.add(str(d).encode()) updatedTree = ledger.tree ledger.stop() tree = CompactMerkleTree(hashStore=fhs) restartedLedger = Ledger(tree=tree, dataDir=tempdir) assert restartedLedger.size == ledger.size assert restartedLedger.root_hash == ledger.root_hash assert restartedLedger.tree.hashes == updatedTree.hashes assert restartedLedger.tree.root_hash == updatedTree.root_hash
def testCompactMerkleTree(hasherAndTree, verifier): h, m = hasherAndTree printEvery = 1000 count = TXN_COUNT for d in range(count): data = str(d + 1).encode() data_hex = hexlify(data) audit_path = m.append(data) audit_path_hex = [hexlify(h) for h in audit_path] incl_proof = m.inclusion_proof(d, d + 1) assert audit_path == incl_proof assert m.nodeCount == m.get_expected_node_count(m.leafCount) assert m.hashStore.is_consistent if d % printEvery == 0: show(h, m, data_hex) print("audit path is {}".format(audit_path_hex)) print("audit path length is {}".format(verifier.audit_path_length( d, d + 1))) print("audit path calculated length is {}".format( len(audit_path))) calculated_root_hash = verifier._calculate_root_hash_from_audit_path( h.hash_leaf(data), d, audit_path[:], d + 1) if d % printEvery == 0: print("calculated root hash is {}".format(calculated_root_hash)) sth = STH(d + 1, m.root_hash) verifier.verify_leaf_inclusion(data, d, audit_path, sth) checkConsistency(m, verifier=verifier) for d in range(1, count): verifier.verify_tree_consistency(d, d + 1, m.merkle_tree_hash(0, d), m.merkle_tree_hash(0, d + 1), m.consistency_proof(d, d + 1)) newTree = CompactMerkleTree(hasher=h) m.save(newTree) assert m.root_hash == newTree.root_hash assert m.hashes == newTree.hashes newTree = CompactMerkleTree(hasher=h) newTree.load(m) assert m.root_hash == newTree.root_hash assert m.hashes == newTree.hashes newTree = copy(m) assert m.root_hash == newTree.root_hash assert m.hashes == newTree.hashes
def testRecoverLedgerFromHashStore(hashStore, tconf, tdir): cleanup(hashStore) tree = CompactMerkleTree(hashStore=hashStore) ledger = Ledger(tree=tree, dataDir=tdir) for d in range(10): ledger.add(str(d).encode()) updatedTree = ledger.tree ledger.stop() tree = CompactMerkleTree(hashStore=hashStore) restartedLedger = Ledger(tree=tree, dataDir=tdir) assert restartedLedger.size == ledger.size assert restartedLedger.root_hash == ledger.root_hash assert restartedLedger.tree.hashes == updatedTree.hashes assert restartedLedger.tree.root_hash == updatedTree.root_hash restartedLedger.stop()
def getPrimaryStorage(self): """ This is usually an implementation of Ledger """ if self.config.primaryStorage is None: fields = getTxnOrderedFields() defaultTxnFile = os.path.join(self.basedirpath, self.config.domainTransactionsFile) if not os.path.exists(defaultTxnFile): logger.debug( "Not using default initialization file for " "domain ledger, since it does not exist: {}".format( defaultTxnFile)) defaultTxnFile = None return Ledger(CompactMerkleTree(hashStore=self.hashStore), dataDir=self.dataLocation, serializer=CompactSerializer(fields=fields), fileName=self.config.domainTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability, defaultFile=defaultTxnFile) else: return initStorage(self.config.primaryStorage, name=self.name + NODE_PRIMARY_STORAGE_SUFFIX, dataDir=self.dataLocation, config=self.config)
def tdirWithPoolTxns(poolTxnData, tdir, tconf): ledger = Ledger(CompactMerkleTree(), dataDir=tdir, fileName=tconf.poolTransactionsFile) for item in poolTxnData["txns"]: ledger.add(item) return tdir
def testRecoverMerkleTreeFromLedger(tempdir): ledger2 = Ledger(CompactMerkleTree(), dataDir=tempdir, serializer=ledgerSerializer) assert ledger2.tree.root_hash is not None ledger2.reset() ledger2.stop()
def getConfigLedger(self): hashStore = LevelDbHashStore(dataDir=self.dataLocation, fileNamePrefix='config') return Ledger(CompactMerkleTree(hashStore=hashStore), dataDir=self.dataLocation, fileName=self.config.configTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability)
def is_consistent(self) -> bool: """ Returns True if number of nodes are consistent with number of leaves """ from ledger.compact_merkle_tree import CompactMerkleTree return self.nodeCount == CompactMerkleTree.get_expected_node_count( self.leafCount)
def ledger(tempdir): ledger = Ledger( CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)), dataDir=tempdir, serializer=ledgerSerializer) ledger.reset() return ledger
def test_parse_non_base58_txn_type_field_raises_descriptive_error( tdirWithLedger, tdir): with pytest.raises(ValueError) as excinfo: ledger = Ledger(CompactMerkleTree(), dataDir=tdir) _, _, nodeKeys = TxnStackManager.parseLedgerForHaAndKeys(ledger) assert ("verkey" in str(excinfo.value)) ledger.stop()
def get_graphchain_ledger(data_dir, name, hash_store, config): logger.info("Creating LEI ledger store with '{}' name in the '{}' dir.".format(name, data_dir)) return Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=data_dir, fileName=name, ensureDurability=config.EnsureLedgerDurability)
def is_consistent(self) -> bool: """ Returns True if number of nodes are consistent with number of leaves """ from ledger.compact_merkle_tree import CompactMerkleTree return self.nodeCount == CompactMerkleTree.get_expected_node_count( self.leafCount)
def createGenesisTxnFile(genesisTxns, targetDir, fileName, fieldOrdering, reset=True): ledger = Ledger(CompactMerkleTree(), dataDir=targetDir, serializer=CompactSerializer(fields=fieldOrdering), fileName=fileName) if reset: ledger.reset() reqIds = {} for txn in genesisTxns: identifier = txn.get(f.IDENTIFIER.nm, "") if identifier not in reqIds: reqIds[identifier] = 0 reqIds[identifier] += 1 txn.update({ f.REQ_ID.nm: reqIds[identifier], f.IDENTIFIER.nm: identifier }) ledger.add(txn) ledger.stop()
def updateGenesisPoolTxnFile(genesisTxnDir, genesisTxnFile, txn): # The lock is an advisory lock, it might not work on linux filesystems # not mounted with option `-o mand`, another approach can be to use a .lock # file to indicate presence or absence of .lock try: # Exclusively lock file in a non blocking manner. Locking is neccessary # since there might be multiple clients running on a machine so genesis # files should be updated safely. # TODO: There is no automated test in the codebase that confirms it. # It has only been manaully tested in the python terminal. Add a test # for it using multiple processes writing concurrently with portalocker.Lock(os.path.join(genesisTxnDir, genesisTxnFile), truncate=None, flags=portalocker.LOCK_EX | portalocker.LOCK_NB): seqNo = txn[F.seqNo.name] ledger = Ledger(CompactMerkleTree(hashStore=FileHashStore( dataDir=genesisTxnDir)), dataDir=genesisTxnDir, fileName=genesisTxnFile) ledgerSize = len(ledger) if seqNo - ledgerSize == 1: ledger.add({k: v for k, v in txn.items() if k != F.seqNo.name}) logger.debug('Adding transaction with sequence number {} in' ' genesis pool transaction file'.format(seqNo)) else: logger.debug('Already {} genesis pool transactions present so ' 'transaction with sequence number {} ' 'not applicable'.format(ledgerSize, seqNo)) except (portalocker.LockException, portalocker.LockException) as ex: return
def updatedDomainTxnFile(tdir, tdirWithDomainTxns, genesisTxns, domainTxnOrderedFields, tconf): ledger = Ledger(CompactMerkleTree(), dataDir=tdir, serializer=CompactSerializer(fields=domainTxnOrderedFields), fileName=tconf.domainTransactionsFile) for txn in genesisTxns: ledger.add(txn)
def init_pool_ledger(cls, appendToLedgers, baseDir, config, envName): poolTxnFile = cls.pool_ledger_file_name(config, envName) pool_ledger = Ledger(CompactMerkleTree(), dataDir=baseDir, fileName=poolTxnFile) if not appendToLedgers: pool_ledger.reset() return pool_ledger
def create_default_ledger(tempdir, init_genesis_txn_file=None): genesis_txn_initiator = GenesisTxnInitiatorFromFile( tempdir, init_genesis_txn_file) if init_genesis_txn_file else None ledger = Ledger( CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)), dataDir=tempdir, genesis_txn_initiator=genesis_txn_initiator) return ledger
def tdirWithLedger(tdir): tree = CompactMerkleTree() ledger = Ledger(CompactMerkleTree(), dataDir=tdir) for d in range(3): txn = { TXN_TYPE: '0', TARGET_NYM: base58.b58encode(b'whatever'), DATA: { NAME: str(d), ALIAS: 'test' + str(d), SERVICES: [VALIDATOR], } } if d == 1: txn[TARGET_NYM] = "invalid====" ledger.add(txn) return ledger
def addTxnToFile(dir, file, txns, fields=getTxnOrderedFields()): ledger = Ledger(CompactMerkleTree(), dataDir=dir, serializer=CompactSerializer(fields=fields), fileName=file) for txn in txns: ledger.add(txn) ledger.stop()
def invalid_verkey_tdir(tdir_for_func): tree = CompactMerkleTree() ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func) for d in range(3): txn = {TXN_TYPE: '0', TARGET_NYM: base58.b58encode(b'whatever'), IDENTIFIER: "Th7MpTaRZVRYnPiabds81Y", DATA: { NAME: str(d), ALIAS: 'test' + str(d), SERVICES: [VALIDATOR], } } if d == 1: txn[TARGET_NYM] = "invalid====" ledger.add(txn) ledger.stop()
def tdirWithPoolTxns(poolTxnData, tdir, tconf): ledger = Ledger(CompactMerkleTree(), dataDir=tdir, fileName=tconf.poolTransactionsFile) for item in poolTxnData["txns"]: if item.get(TXN_TYPE) in (NEW_NODE, CHANGE_HA, CHANGE_KEYS): ledger.add(item) return tdir
def tdirWithPoolTxns(poolTxnData, tdir, tconf): ledger = Ledger(CompactMerkleTree(), dataDir=tdir, fileName=tconf.poolTransactionsFile) for item in poolTxnData["txns"]: if item.get(TXN_TYPE) == NODE: ledger.add(item) ledger.stop() return tdir
def init_pool_ledger(self): genesis_txn_initiator = GenesisTxnInitiatorFromFile( self.node.genesis_dir, self.node.config.poolTransactionsFile) tree = CompactMerkleTree(hashStore=self.node.getHashStore('pool')) return Ledger(tree, dataDir=self.node.dataLocation, fileName=self.node.config.poolTransactionsFile, ensureDurability=self.node.config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator)
def nodeSetLedger(nodeSet, tdir): """ Overrides the fixture from conftest.py """ for n in nodeSet: dirPath = os.path.join(tdir, n.name, "temp") if not os.path.exists(dirPath): os.makedirs(dirPath) n.txnStore = Ledger(CompactMerkleTree(), dirPath) yield nodeSet
def test_parse_verkey_non_base58_txn_type_field_raises_SystemExit_has_descriptive_error( invalid_verkey_tdir, tdir_for_func): """ Test that invalid base58 TARGET_NYM in pool_transaction raises the proper exception (INDY-150) """ with pytest.raises(SystemExit) as excinfo: ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func) _, _, nodeKeys = TxnStackManager.parseLedgerForHaAndKeys(ledger) assert excinfo.value.code == 'Invalid verkey. Rebuild pool transactions.' ledger.stop()
def _open_new_ledger(data_directory, new_ledger_file, hash_store_name): # open new Ledger with leveldb hash store (to re-init it) logger.info("Open new ledger folder: {}".format( os.path.join(data_directory, new_ledger_file))) new_ledger = Ledger(CompactMerkleTree( hashStore=LevelDbHashStore( dataDir=data_directory, fileNamePrefix=hash_store_name)), dataDir=data_directory, fileName=new_ledger_file) new_ledger.stop()
def tdirWithDomainTxns(poolTxnData, tdir, tconf, domainTxnOrderedFields): ledger = Ledger( CompactMerkleTree(), dataDir=tdir, serializer=CompactSerializer(fields=domainTxnOrderedFields), fileName=tconf.domainTransactionsFile) for item in poolTxnData["txns"]: if item.get(TXN_TYPE) == NYM: ledger.add(item) return tdir
def test_parse_identifier_non_base58_txn_type_field_raises_SystemExit_has_descriptive_error( invalid_identifier_tdir, tdir_for_func): """ Test that invalid base58 IDENTIFIER in pool_transaction raises the proper exception (INDY-150) """ with pytest.raises(SystemExit) as excinfo: ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func) _, _, nodeKeys = TxnStackManager.parseLedgerForHaAndKeys(ledger) assert excinfo.value.code == errMsg2 ledger.stop()
def test_pool_file_is_invalid_raises_SystemExit_has_descriptive_error( tdir_for_func): """ Test that that invalid pool_transaction file raises the proper exception (INDY-150) """ ledger = DummyLedger(CompactMerkleTree(), dataDir=tdir_for_func) with pytest.raises(SystemExit) as excinfo: _, _, nodeKeys = TxnStackManager.parseLedgerForHaAndKeys(ledger) assert excinfo.value.code == errMsg ledger.stop()