def verifyMerkleProof(*replies: Tuple[Reply]) -> bool: """ Verifies the correctness of the merkle proof provided in the reply from the node. Returns True if verified to be correct, throws an exception otherwise. :param replies: One or more replies for which Merkle Proofs have to be verified :raises ProofError: The proof is invalid :return: True """ verifier = MerkleVerifier() fields = getTxnOrderedFields() serializer = CompactSerializer(fields=fields) for r in replies: seqNo = r[f.RESULT.nm][F.seqNo.name] rootHash = base64.b64decode( r[f.RESULT.nm][F.rootHash.name].encode()) auditPath = [ base64.b64decode(a.encode()) for a in r[f.RESULT.nm][F.auditPath.name] ] filtered = ( (k, v) for (k, v) in r[f.RESULT.nm].iteritems() if k not in [F.auditPath.name, F.seqNo.name, F.rootHash.name]) result = serializer.serialize(dict(filtered)) verifier.verify_leaf_inclusion( result, seqNo - 1, auditPath, STH(tree_size=seqNo, sha256_root_hash=rootHash)) return True
def verifyMerkleProof(*replies: Tuple[Reply]) -> bool: """ Verifies the correctness of the merkle proof provided in the reply from the node. Returns True if verified to be correct, throws an exception otherwise. :param replies: One or more replies for which Merkle Proofs have to be verified :raises ProofError: The proof is invalid :return: True """ verifier = MerkleVerifier() fields = getTxnOrderedFields() serializer = CompactSerializer(fields=fields) for r in replies: seqNo = r[f.RESULT.nm][F.seqNo.name] rootHash = base64.b64decode( r[f.RESULT.nm][F.rootHash.name].encode()) auditPath = [base64.b64decode( a.encode()) for a in r[f.RESULT.nm][F.auditPath.name]] filtered = ((k, v) for (k, v) in r[f.RESULT.nm].iteritems() if k not in [F.auditPath.name, F.seqNo.name, F.rootHash.name]) result = serializer.serialize(dict(filtered)) verifier.verify_leaf_inclusion(result, seqNo - 1, auditPath, STH(tree_size=seqNo, sha256_root_hash=rootHash)) return True
def __init__(self, name, baseDir=None): self.dataDir = "data/clients" self.name = name HasFileStorage.__init__(self, name, baseDir=baseDir, dataDir=self.dataDir) self.clientDataLocation = self.dataLocation if not os.path.exists(self.clientDataLocation): os.makedirs(self.clientDataLocation) self.transactionLog = TextFileStore(self.clientDataLocation, "transactions") self.serializer = CompactSerializer(fields=self.txnFieldOrdering)
def testRecoverLedgerNewFieldsToTxnsAdded(tempdir): fhs = FileHashStore(tempdir) tree = CompactMerkleTree(hashStore=fhs) ledger = Ledger(tree=tree, dataDir=tempdir, serializer=ledgerSerializer) for d in range(10): ledger.add({ "identifier": "i{}".format(d), "reqId": d, "op": "operation" }) updatedTree = ledger.tree ledger.stop() newOrderedFields = OrderedDict([("identifier", (str, str)), ("reqId", (str, int)), ("op", (str, str)), ("newField", (str, str))]) newLedgerSerializer = CompactSerializer(newOrderedFields) tree = CompactMerkleTree(hashStore=fhs) restartedLedger = Ledger(tree=tree, dataDir=tempdir, serializer=newLedgerSerializer) assert restartedLedger.size == ledger.size assert restartedLedger.root_hash == ledger.root_hash assert restartedLedger.tree.hashes == updatedTree.hashes assert restartedLedger.tree.root_hash == updatedTree.root_hash
def createGenesisTxnFile(genesisTxns, targetDir, fileName, fieldOrdering, reset=True): ledger = Ledger(CompactMerkleTree(), dataDir=targetDir, serializer=CompactSerializer(fields=fieldOrdering), fileName=fileName) if reset: ledger.reset() reqIds = {} for txn in genesisTxns: identifier = txn.get(f.IDENTIFIER.nm, "") if identifier not in reqIds: reqIds[identifier] = 0 reqIds[identifier] += 1 txn.update({ f.REQ_ID.nm: reqIds[identifier], f.IDENTIFIER.nm: identifier }) ledger.add(txn) ledger.stop()
class ClientTxnLog(HasFileStorage): """ An immutable log of transactions made by the client. """ def __init__(self, name, baseDir=None): self.dataDir = "data/clients" self.name = name HasFileStorage.__init__(self, name, baseDir=baseDir, dataDir=self.dataDir) self.clientDataLocation = self.dataLocation if not os.path.exists(self.clientDataLocation): os.makedirs(self.clientDataLocation) self.transactionLog = TextFileStore(self.clientDataLocation, "transactions") self.serializer = CompactSerializer(fields=self.txnFieldOrdering) @property def txnFieldOrdering(self): fields = getTxnOrderedFields() return updateFieldsWithSeqNo(fields) def append(self, reqId, txn): self.transactionLog.put(key=str(reqId), value=self.serializer.serialize(txn, fields=self.txnFieldOrdering, toBytes=False)) def hasTxnWithReqId(self, reqId) -> bool: for key in self.transactionLog.iterator(includeKey=True, includeValue=False): if key == str(reqId): return True return False
def getPrimaryStorage(self): """ This is usually an implementation of Ledger """ if self.config.primaryStorage is None: fields = getTxnOrderedFields() defaultTxnFile = os.path.join(self.basedirpath, self.config.domainTransactionsFile) if not os.path.exists(defaultTxnFile): logger.debug( "Not using default initialization file for " "domain ledger, since it does not exist: {}".format( defaultTxnFile)) defaultTxnFile = None return Ledger(CompactMerkleTree(hashStore=self.hashStore), dataDir=self.dataLocation, serializer=CompactSerializer(fields=fields), fileName=self.config.domainTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability, defaultFile=defaultTxnFile) else: return initStorage(self.config.primaryStorage, name=self.name + NODE_PRIMARY_STORAGE_SUFFIX, dataDir=self.dataLocation, config=self.config)
class ClientTxnLog(HasFileStorage): """ An immutable log of transactions made by the client. """ def __init__(self, name, baseDir=None): self.dataDir = "data/clients" self.name = name HasFileStorage.__init__(self, name, baseDir=baseDir, dataDir=self.dataDir) self.clientDataLocation = self.dataLocation if not os.path.exists(self.clientDataLocation): os.makedirs(self.clientDataLocation) self.transactionLog = TextFileStore(self.clientDataLocation, "transactions") self.serializer = CompactSerializer(fields=self.txnFieldOrdering) @property def txnFieldOrdering(self): fields = getTxnOrderedFields() return updateFieldsWithSeqNo(fields) def append(self, identifier: str, reqId, txn): key = '{}{}'.format(identifier, reqId) self.transactionLog.put(key=key, value=self.serializer.serialize(txn, fields=self.txnFieldOrdering, toBytes=False)) def hasTxn(self, identifier, reqId) -> bool: key = '{}{}'.format(identifier, reqId) for key in self.transactionLog.iterator(includeKey=True, includeValue=False): if key == str(reqId): return True return False
def testDeserializeSubfields(): fields = OrderedDict([ ("f1.a", (str, str)), ("f1.b", (str, int)), ("f1.c", (str, float)), ("f2.d", (str, str)), ("f2.e", (str, int)), ("f2.f", (str, float)), ]) serializer = CompactSerializer(fields) json = { "f1":{"a": "v1", "b": 2, "c": 3.0}, "f2": {"d": "v1", "e": 3, "f": 4.0}, } assert json == serializer.deserialize(b"v1|2|3.0|v1|3|4.0")
def updatedDomainTxnFile(tdir, tdirWithDomainTxns, genesisTxns, domainTxnOrderedFields, tconf): ledger = Ledger(CompactMerkleTree(), dataDir=tdir, serializer=CompactSerializer(fields=domainTxnOrderedFields), fileName=tconf.domainTransactionsFile) for txn in genesisTxns: ledger.add(txn)
def addTxnToFile(dir, file, txns, fields=getTxnOrderedFields()): ledger = Ledger(CompactMerkleTree(), dataDir=dir, serializer=CompactSerializer(fields=fields), fileName=file) for txn in txns: ledger.add(txn) ledger.stop()
def tdirWithDomainTxns(poolTxnData, tdir, tconf, domainTxnOrderedFields): ledger = Ledger( CompactMerkleTree(), dataDir=tdir, serializer=CompactSerializer(fields=domainTxnOrderedFields), fileName=tconf.domainTransactionsFile) for item in poolTxnData["txns"]: if item.get(TXN_TYPE) == NYM: ledger.add(item) return tdir
def init_domain_ledger(cls, appendToLedgers, baseDir, config, envName, domainTxnFieldOrder): domainTxnFile = cls.domain_ledger_file_name(config, envName) ser = CompactSerializer(fields=domainTxnFieldOrder) domain_ledger = Ledger(CompactMerkleTree(), serializer=ser, dataDir=baseDir, fileName=domainTxnFile) if not appendToLedgers: domain_ledger.reset() return domain_ledger
def __init__(self, name, baseDir=None): self.dataDir = "data/clients" self.name = name HasFileStorage.__init__(self, name, baseDir=baseDir, dataDir=self.dataDir) self.clientDataLocation = self.dataLocation if not os.path.exists(self.clientDataLocation): os.makedirs(self.clientDataLocation) self.transactionLog = TextFileStore(self.clientDataLocation, "transactions") self.serializer = CompactSerializer(fields=self.txnFieldOrdering)
def getPrimaryStorage(self): """ This is usually an implementation of Ledger """ if self.config.primaryStorage is None: fields = getTxnOrderedFields() return Ledger(CompactMerkleTree(hashStore=self.hashStore), dataDir=self.dataLocation, serializer=CompactSerializer(fields=fields), fileName=self.config.domainTransactionsFile) else: return initStorage(self.config.primaryStorage, name=self.name + NODE_PRIMARY_STORAGE_SUFFIX, dataDir=self.dataLocation, config=self.config)
def testTxnPersistence(tempdir): tdir = tempdir loop = asyncio.get_event_loop() fields = OrderedDict([ ("identifier", (str, str)), ("reqId", (str, int)), ("txnId", (str, str)), ("txnTime", (str, float)), ("txnType", (str, str)), ]) ldb = Ledger(CompactMerkleTree(), tdir, serializer=CompactSerializer(fields=fields)) def go(): identifier = "testClientId" txnId = "txnId" reply = Reply( result={ "identifier": identifier, "reqId": 1, "txnId": txnId, "txnTime": time.time(), "txnType": "buy" }) sizeBeforeInsert = ldb.size ldb.append(reply.result) txn_in_db = ldb.get(identifier=identifier, reqId=reply.result['reqId']) txn_in_db.pop(F.seqNo.name) assert txn_in_db == reply.result assert ldb.size == sizeBeforeInsert + 1 ldb.reset() ldb.stop() go() loop.close()
def testInitCompactSerializerNoFileds(): CompactSerializer()
def testInitCompactSerializerWithCorrectFileds(): CompactSerializer(fields)
def txnSerializer(self): # if not self._serializer: # self._serializer = CompactSerializer(fields=self.txnFieldOrdering) # return self._serializer return CompactSerializer(fields=self.txnFieldOrdering)
def serializer(): return CompactSerializer(fields)
def testInitCompactSerializerEmptyFileds(): fields = OrderedDict([]) CompactSerializer(fields)
def b64e(s): return base64.b64encode(s).decode("utf-8") def b64d(s): return base64.b64decode(s) def lst2str(l): return ",".join(l) orderedFields = OrderedDict([("identifier", (str, str)), ("reqId", (str, int)), ("op", (str, str))]) ledgerSerializer = CompactSerializer(orderedFields) leafSerializer = JsonSerializer() # @pytest.yield_fixture(scope='function') # def tempdir(): # with TemporaryDirectory() as tdir: # yield tdir @pytest.fixture(scope="function") def ledger(tempdir): ledger = Ledger( CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)), dataDir=tempdir, serializer=ledgerSerializer) ledger.reset()
def bootstrapTestNodesCore(config, envName, appendToLedgers, domainTxnFieldOrder, ips, nodeCount, clientCount, nodeNum, startingPort): baseDir = config.baseDir if not os.path.exists(baseDir): os.makedirs(baseDir, exist_ok=True) if not ips: ips = ['127.0.0.1'] * nodeCount else: ips = ips.split(",") if len(ips) != nodeCount: if len(ips) > nodeCount: ips = ips[:nodeCount] else: ips += ['127.0.0.1'] * (nodeCount - len(ips)) if hasattr(config, "ENVS") and envName: poolTxnFile = config.ENVS[envName].poolLedger domainTxnFile = config.ENVS[envName].domainLedger else: poolTxnFile = config.poolTransactionsFile domainTxnFile = config.domainTransactionsFile poolLedger = Ledger(CompactMerkleTree(), dataDir=baseDir, fileName=poolTxnFile) domainLedger = Ledger( CompactMerkleTree(), serializer=CompactSerializer(fields=domainTxnFieldOrder), dataDir=baseDir, fileName=domainTxnFile) if not appendToLedgers: poolLedger.reset() domainLedger.reset() steward1Nym = None for num in range(1, nodeCount + 1): stewardName = "Steward" + str(num) sigseed = TestNetworkSetup.getSigningSeed(stewardName) verkey = Signer(sigseed).verhex stewardNym = TestNetworkSetup.getNymFromVerkey(verkey) txn = { TARGET_NYM: stewardNym, TXN_TYPE: NYM, ROLE: STEWARD, ALIAS: stewardName, TXN_ID: sha256(stewardName.encode()).hexdigest() } if num == 1: steward1Nym = stewardNym else: # The first steward adds every steward txn[f.IDENTIFIER.nm] = steward1Nym domainLedger.add(txn) nodeName = "Node" + str(num) nodePort, clientPort = startingPort + (num * 2 - 1), startingPort \ + (num * 2) ip = ips[num - 1] sigseed = TestNetworkSetup.getSigningSeed(nodeName) if nodeNum == num: _, verkey = initLocalKeep(nodeName, baseDir, sigseed, True) verkey = verkey.encode() print("This node with name {} will use ports {} and {} for " "nodestack and clientstack respectively".format( nodeName, nodePort, clientPort)) else: verkey = Signer(sigseed).verhex txn = { TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey), TXN_TYPE: NODE, f.IDENTIFIER.nm: stewardNym, DATA: { CLIENT_IP: ip, ALIAS: nodeName, CLIENT_PORT: clientPort, NODE_IP: ip, NODE_PORT: nodePort, SERVICES: [VALIDATOR] }, TXN_ID: sha256(nodeName.encode()).hexdigest() } poolLedger.add(txn) for num in range(1, clientCount + 1): clientName = "Client" + str(num) sigseed = TestNetworkSetup.getSigningSeed(clientName) verkey = Signer(sigseed).verhex txn = { f.IDENTIFIER.nm: steward1Nym, TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey), TXN_TYPE: NYM, ALIAS: clientName, TXN_ID: sha256(clientName.encode()).hexdigest() } domainLedger.add(txn) poolLedger.stop() domainLedger.stop()
def txnSerializer(self): return CompactSerializer(fields=self.txnFieldOrdering)