def verifyMerkleProof(*replies: Tuple[Reply]) -> bool: """ Verifies the correctness of the merkle proof provided in the reply from the node. Returns True if verified to be correct, throws an exception otherwise. :param replies: One or more replies for which Merkle Proofs have to be verified :raises ProofError: The proof is invalid :return: True """ verifier = MerkleVerifier() serializer = ledger_txn_serializer ignored = {F.auditPath.name, F.seqNo.name, F.rootHash.name} for r in replies: seqNo = r[f.RESULT.nm][F.seqNo.name] rootHash = Ledger.strToHash( r[f.RESULT.nm][F.rootHash.name]) auditPath = [Ledger.strToHash(a) for a in r[f.RESULT.nm][F.auditPath.name]] filtered = dict((k, v) for (k, v) in r[f.RESULT.nm].items() if k not in ignored) result = serializer.serialize(filtered) verifier.verify_leaf_inclusion(result, seqNo - 1, auditPath, STH(tree_size=seqNo, sha256_root_hash=rootHash)) return True
def on_batch_rejected(utxo_cache, tracker: LedgerUncommittedTracker, state: PruningState, ledger: Ledger): uncommitted_hash, txn_count = tracker.reject_batch() if txn_count == 0: return 0 state.revertToHead(uncommitted_hash) ledger.discardTxns(txn_count) utxo_cache.reject_batch() return txn_count
def _has_valid_catchup_replies( self, seq_no: int, txns_to_process: List[Tuple[int, Any]]) -> Tuple[bool, str, int]: """ Transforms transactions for ledger! Returns: Whether catchup reply corresponding to seq_no Name of node from which txns came Number of transactions ready to be processed """ # TODO: Remove after stop passing seqNo here assert seq_no == txns_to_process[0][0] # Here seqNo has to be the seqNo of first transaction of # `catchupReplies` # Get the transactions in the catchup reply which has sequence # number `seqNo` node_name, catchup_rep = self._find_catchup_reply_for_seq_no(seq_no) txns = catchup_rep.txns # Add only those transaction in the temporary tree from the above # batch which are not present in the ledger # Integer keys being converted to strings when marshaled to JSON txns = [ self._provider.transform_txn_for_ledger(txn) for s, txn in txns_to_process[:len(txns)] if str(s) in txns ] # Creating a temporary tree which will be used to verify consistency # proof, by inserting transactions. Duplicating a merkle tree is not # expensive since we are using a compact merkle tree. temp_tree = self._ledger.treeWithAppliedTxns(txns) proof = catchup_rep.consProof final_size = self._catchup_till.seqNoEnd final_hash = self._catchup_till.newMerkleRoot try: logger.info("{} verifying proof for {}, {}, {}, {}, {}".format( self, temp_tree.tree_size, final_size, temp_tree.root_hash, Ledger.strToHash(final_hash), [Ledger.strToHash(p) for p in proof])) verified = self._provider.verifier( self._ledger_id).verify_tree_consistency( temp_tree.tree_size, final_size, temp_tree.root_hash, Ledger.strToHash(final_hash), [Ledger.strToHash(p) for p in proof]) except Exception as ex: logger.info("{} could not verify catchup reply {} since {}".format( self, catchup_rep, ex)) verified = False return bool(verified), node_name, len(txns)
def test_multiple_ledgers_in_second_batch_apply_first_time(txnPoolNodeSet): # First txn node = txnPoolNodeSet[0] audit_batch_handler = node.write_manager.audit_b_handler op = { TXN_TYPE: PlenumTransactions.NYM.value, TARGET_NYM: "000000000000000000000000Trustee4", ROLE: None } nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) domain_root_hash = Ledger.hashToStr(node.domainLedger.uncommittedRootHash) batch = get_3PC_batch(domain_root_hash) txn_data = audit_batch_handler._create_audit_txn_data( batch, audit_batch_handler.ledger.get_last_txn()) append_txn_to_ledger(txn_data, node.auditLedger, 2) # Checking rare case -- batch from two ledgers, that were never audited before op2 = { TXN_TYPE: PlenumTransactions.NODE.value, TARGET_NYM: "000000000000000000000000Trustee1", DATA: { ALIAS: "Node100" } } node_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(node_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) pool_root_hash = Ledger.hashToStr(node.poolLedger.uncommittedRootHash) pool_state_root = Ledger.hashToStr(node.states[0].headHash) config_root_hash = Ledger.hashToStr(node.configLedger.uncommittedRootHash) config_state_root = Ledger.hashToStr(node.states[2].headHash) batch = get_3PC_batch(pool_root_hash, ledger_id=0) txn_data = audit_batch_handler._create_audit_txn_data( batch, audit_batch_handler.ledger.get_last_txn()) assert txn_data[AUDIT_TXN_LEDGER_ROOT][0] == pool_root_hash assert txn_data[AUDIT_TXN_STATE_ROOT][0] == pool_state_root assert txn_data[AUDIT_TXN_LEDGER_ROOT][1] == 1 assert 1 not in txn_data[AUDIT_TXN_STATE_ROOT].keys() assert txn_data[AUDIT_TXN_LEDGER_ROOT][2] == config_root_hash assert txn_data[AUDIT_TXN_STATE_ROOT][2] == config_state_root
def from_ordered(ordered): return ThreePcBatch(ledger_id=ordered.ledgerId, inst_id=ordered.instId, view_no=ordered.viewNo, pp_seq_no=ordered.ppSeqNo, pp_time=ordered.ppTime, valid_txn_count=len(ordered.valid_reqIdr), state_root=Ledger.strToHash(ordered.stateRootHash), txn_root=Ledger.strToHash(ordered.txnRootHash), has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in ordered and ordered.auditTxnRootHash is not None)
def from_batch_committed_dict(batch_comitted): return ThreePcBatch( ledger_id=batch_comitted[f.LEDGER_ID.nm], inst_id=batch_comitted[f.INST_ID.nm], view_no=batch_comitted[f.VIEW_NO.nm], pp_seq_no=batch_comitted[f.PP_SEQ_NO.nm], pp_time=batch_comitted[f.PP_TIME.nm], valid_txn_count=batch_comitted[f.SEQ_NO_END.nm] - batch_comitted[f.SEQ_NO_START.nm] + 1, state_root=Ledger.strToHash(batch_comitted[f.STATE_ROOT.nm]), txn_root=Ledger.strToHash(batch_comitted[f.TXN_ROOT.nm]), has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in batch_comitted and batch_comitted[f.AUDIT_TXN_ROOT_HASH.nm] is not None)
def from_ordered(ordered): return ThreePcBatch(ledger_id=ordered.ledgerId, inst_id=ordered.instId, view_no=ordered.viewNo, pp_seq_no=ordered.ppSeqNo, pp_time=ordered.ppTime, state_root=Ledger.strToHash(ordered.stateRootHash), txn_root=Ledger.strToHash(ordered.txnRootHash), primaries=ordered.primaries, valid_digests=ordered.valid_reqIdr, has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in ordered and ordered.auditTxnRootHash is not None, original_view_no=ordered.originalViewNo)
def test_audit_ledger_multiple_ledgers_in_one_batch(txnPoolNodeSet): # Checking first case -- first audit txn node = txnPoolNodeSet[0] audit_batch_handler = node.write_manager.audit_b_handler op = { TXN_TYPE: PlenumTransactions.NYM.value, TARGET_NYM: "000000000000000000000000Trustee4" } nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version1"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) domain_root_hash = Ledger.hashToStr(node.domainLedger.uncommittedRootHash) config_root_hash = Ledger.hashToStr(node.configLedger.uncommittedRootHash) domain_state_root = Ledger.hashToStr(node.states[1].headHash) config_state_root = Ledger.hashToStr(node.states[2].headHash) batch = get_3PC_batch(domain_root_hash) txn_data = audit_batch_handler._create_audit_txn_data( batch, audit_batch_handler.ledger.get_last_txn()) append_txn_to_ledger(txn_data, node.auditLedger, 1) assert txn_data[AUDIT_TXN_LEDGER_ROOT][1] == domain_root_hash assert txn_data[AUDIT_TXN_LEDGER_ROOT][2] == config_root_hash assert txn_data[AUDIT_TXN_STATE_ROOT][1] == domain_state_root assert txn_data[AUDIT_TXN_STATE_ROOT][2] == config_state_root # Checking usual case -- double update not in a first transaction op = { TXN_TYPE: PlenumTransactions.NYM.value, TARGET_NYM: "000000000000000000000000Trustee5" } nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) # Checking second batch created domain_root_hash_2 = Ledger.hashToStr( node.domainLedger.uncommittedRootHash) config_root_hash_2 = Ledger.hashToStr( node.configLedger.uncommittedRootHash) domain_state_root_2 = Ledger.hashToStr(node.states[1].headHash) config_state_root_2 = Ledger.hashToStr(node.states[2].headHash) batch = get_3PC_batch(domain_root_hash_2) txn_data = audit_batch_handler._create_audit_txn_data( batch, audit_batch_handler.ledger.get_last_txn()) # Checking first batch created assert txn_data[AUDIT_TXN_LEDGER_ROOT][1] == domain_root_hash_2 assert txn_data[AUDIT_TXN_LEDGER_ROOT][2] == config_root_hash_2 assert txn_data[AUDIT_TXN_STATE_ROOT][1] == domain_state_root_2 assert txn_data[AUDIT_TXN_STATE_ROOT][2] == config_state_root_2
def _build_consistency_proof( self, ledger_id: int, seq_no_start: int, seq_no_end: int) -> Optional[ConsistencyProof]: ledger = self._provider.ledger(ledger_id) if seq_no_end < seq_no_start: logger.error( "{} cannot build consistency proof: end {} is less than start {}" .format(self, seq_no_end, seq_no_start)) return if seq_no_start > ledger.size: logger.error( "{} cannot build consistency proof: start {} is more than ledger size {}" .format(self, seq_no_start, ledger.size)) return if seq_no_end > ledger.size: logger.error( "{} cannot build consistency proof: end {} is more than ledger size {}" .format(self, seq_no_end, ledger.size)) return if seq_no_start == 0: # Consistency proof for an empty tree cannot exist. Using the root # hash now so that the node which is behind can verify that # TODO: Make this an empty list old_root = ledger.tree.root_hash old_root = Ledger.hashToStr(old_root) proof = [ old_root, ] else: proof = self._make_consistency_proof(ledger, seq_no_start, seq_no_end) old_root = ledger.tree.merkle_tree_hash(0, seq_no_start) old_root = Ledger.hashToStr(old_root) new_root = ledger.tree.merkle_tree_hash(0, seq_no_end) new_root = Ledger.hashToStr(new_root) # TODO: Delete when INDY-1946 gets implemented three_pc_key = self._provider.three_phase_key_for_txn_seq_no( ledger_id, seq_no_end) view_no, pp_seq_no = three_pc_key if three_pc_key else (0, 0) return ConsistencyProof(ledger_id, seq_no_start, seq_no_end, view_no, pp_seq_no, old_root, new_root, proof)
def getConfigLedger(self): hashStore = LevelDbHashStore(dataDir=self.dataLocation, fileNamePrefix='config') return Ledger(CompactMerkleTree(hashStore=hashStore), dataDir=self.dataLocation, fileName=self.config.configTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability)
def get_graphchain_ledger(data_dir, name, hash_store, config): logger.info("Creating LEI ledger store with '{}' name in the '{}' dir.".format(name, data_dir)) return Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=data_dir, fileName=name, ensureDurability=config.EnsureLedgerDurability)
def _make_consistency_proof(self, ledger, end, catchup_till): # TODO: make catchup_till optional # if catchup_till is None: # catchup_till = ledger.size proof = ledger.tree.consistency_proof(end, catchup_till) string_proof = [Ledger.hashToStr(p) for p in proof] return string_proof
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare( 0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, reply_count): ''' Add txn_count transactions to node's ledger and return ConsistencyProof for all new transactions and list of CatchupReplies :return: ConsistencyProof, list of CatchupReplies ''' txn_count = num_txns_in_reply * reply_count ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger catchup_rep_service = ledger_manager._node_leecher._leechers[ledger_id]._catchup_rep_service reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) catchup_rep_service._add_txn(txn) # generate CatchupReps replies = [] for i in range(ledger.seqNo - txn_count + 1, ledger.seqNo + 1, num_txns_in_reply): start = i end = i + num_txns_in_reply - 1 cons_proof = ledger_manager._node_seeder._make_consistency_proof(ledger, end, ledger.size) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[str(seq_no)] = ledger_manager.owner.update_txn_with_extra_data(txn) replies.append(CatchupRep(ledger_id, SortedDict(txns), cons_proof)) three_pc_key = node.three_phase_key_for_txn_seq_no(ledger_id, ledger.seqNo) view_no, pp_seq_no = three_pc_key if three_pc_key else (0, 0) return CatchupTill(start_size=ledger.seqNo - txn_count, final_size=ledger.seqNo, final_hash=Ledger.hashToStr(ledger.tree.merkle_tree_hash(0, ledger.seqNo)), view_no=view_no, pp_seq_no=pp_seq_no), replies
def getPrimaryStorage(self): """ This is usually an implementation of Ledger """ if self.config.primaryStorage is None: fields = getTxnOrderedFields() defaultTxnFile = os.path.join(self.basedirpath, self.config.domainTransactionsFile) if not os.path.exists(defaultTxnFile): logger.debug( "Not using default initialization file for " "domain ledger, since it does not exist: {}".format( defaultTxnFile)) defaultTxnFile = None return Ledger(CompactMerkleTree(hashStore=self.hashStore), dataDir=self.dataLocation, serializer=CompactSerializer(fields=fields), fileName=self.config.domainTransactionsFile, ensureDurability=self.config.EnsureLedgerDurability, defaultFile=defaultTxnFile) else: return initStorage(self.config.primaryStorage, name=self.name + NODE_PRIMARY_STORAGE_SUFFIX, dataDir=self.dataLocation, config=self.config)
def _buildConsistencyProof(self, ledgerId, seqNoStart, seqNoEnd): ledger = self.getLedgerInfoByType(ledgerId).ledger ledgerSize = ledger.size if seqNoStart > ledgerSize: logger.error("{} cannot build consistency proof from {} " "since its ledger size is {}".format( self, seqNoStart, ledgerSize)) return if seqNoEnd > ledgerSize: logger.error("{} cannot build consistency " "proof till {} since its ledger size is {}".format( self, seqNoEnd, ledgerSize)) return if seqNoEnd < seqNoStart: self.error('{} cannot build consistency proof since end {} is ' 'lesser than start {}'.format(self, seqNoEnd, seqNoStart)) return if seqNoStart == 0: # Consistency proof for an empty tree cannot exist. Using the root # hash now so that the node which is behind can verify that # TODO: Make this an empty list oldRoot = ledger.tree.root_hash proof = [ oldRoot, ] else: proof = ledger.tree.consistency_proof(seqNoStart, seqNoEnd) oldRoot = ledger.tree.merkle_tree_hash(0, seqNoStart) newRoot = ledger.tree.merkle_tree_hash(0, seqNoEnd) key = self.owner.three_phase_key_for_txn_seq_no(ledgerId, seqNoEnd) logger.debug('{} found 3 phase key {} for ledger {} seqNo {}'.format( self, key, ledgerId, seqNoEnd)) if key is None: # The node receiving consistency proof should check if it has # received this sentinel 3 phase key (0, 0) in spite of seeing a # non-zero txn seq no key = (0, 0) return ConsistencyProof(ledgerId, seqNoStart, seqNoEnd, *key, Ledger.hashToStr(oldRoot), Ledger.hashToStr(newRoot), [Ledger.hashToStr(p) for p in proof])
def from_batch_committed_dict(batch_comitted): valid_req_keys = [ Request(**req_dict).key for req_dict in batch_comitted[f.REQUESTS.nm] ] return ThreePcBatch( ledger_id=batch_comitted[f.LEDGER_ID.nm], inst_id=batch_comitted[f.INST_ID.nm], view_no=batch_comitted[f.VIEW_NO.nm], pp_seq_no=batch_comitted[f.PP_SEQ_NO.nm], pp_time=batch_comitted[f.PP_TIME.nm], state_root=Ledger.strToHash(batch_comitted[f.STATE_ROOT.nm]), txn_root=Ledger.strToHash(batch_comitted[f.TXN_ROOT.nm]), primaries=batch_comitted[f.PRIMARIES.nm], valid_digests=valid_req_keys, has_audit_txn=f.AUDIT_TXN_ROOT_HASH.nm in batch_comitted and batch_comitted[f.AUDIT_TXN_ROOT_HASH.nm] is not None)
def _calc_catchup_till(self) -> Dict[int, CatchupTill]: audit_ledger = self._provider.ledger(AUDIT_LEDGER_ID) last_audit_txn = audit_ledger.get_last_committed_txn() if last_audit_txn is None: return {} catchup_till = {} last_audit_txn = get_payload_data(last_audit_txn) for ledger_id, final_size in last_audit_txn[ AUDIT_TXN_LEDGERS_SIZE].items(): ledger = self._provider.ledger(ledger_id) if ledger is None: logger.debug( "{} has audit ledger with references to nonexistent " "ledger with ID {}. Maybe it was frozen.".format( self, ledger_id)) continue start_size = ledger.size final_hash = last_audit_txn[AUDIT_TXN_LEDGER_ROOT].get(ledger_id) if final_hash is None: if final_size != ledger.size: logger.error( "{} has corrupted audit ledger: " "it indicates that ledger {} has new transactions but doesn't have new txn root" .format(self, ledger_id)) return {} final_hash = Ledger.hashToStr( ledger.tree.root_hash) if final_size > 0 else None if isinstance(final_hash, int): audit_txn = audit_ledger.getBySeqNo(audit_ledger.size - final_hash) if audit_txn is None: logger.error( "{} has corrupted audit ledger: " "its txn root for ledger {} references nonexistent txn with seq_no {} - {} = {}" .format(self, ledger_id, audit_ledger.size, final_hash, audit_ledger.size - final_hash)) return {} audit_txn = get_payload_data(audit_txn) final_hash = audit_txn[AUDIT_TXN_LEDGER_ROOT].get(ledger_id) if not isinstance(final_hash, str): logger.error( "{} has corrupted audit ledger: " "its txn root for ledger {} references txn with seq_no {} - {} = {} " "which doesn't contain txn root".format( self, ledger_id, audit_ledger.size, final_hash, audit_ledger.size - final_hash)) return {} catchup_till[ledger_id] = CatchupTill(start_size=start_size, final_size=final_size, final_hash=final_hash) return catchup_till
def init_pool_ledger(self): genesis_txn_initiator = GenesisTxnInitiatorFromFile( self.node.genesis_dir, self.node.config.poolTransactionsFile) tree = CompactMerkleTree(hashStore=self.node.getHashStore('pool')) return Ledger(tree, dataDir=self.node.dataLocation, fileName=self.node.config.poolTransactionsFile, ensureDurability=self.node.config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator)
def hasValidCatchupReplies(self, ledgerId, ledger, seqNo, catchUpReplies): # Here seqNo has to be the seqNo of first transaction of # `catchupReplies` # Get the batch of transactions in the catchup reply which has sequence # number `seqNo` nodeName, catchupReply = self._getCatchupReplyForSeqNo(ledgerId, seqNo) txns = getattr(catchupReply, f.TXNS.nm) # Add only those transaction in the temporary tree from the above # batch # Integer keys being converted to strings when marshaled to JSON txns = [ self._transform(txn) for s, txn in catchUpReplies[:len(txns)] if str(s) in txns ] # Creating a temporary tree which will be used to verify consistency # proof, by inserting transactions. Duplicating a merkle tree is not # expensive since we are using a compact merkle tree. tempTree = ledger.treeWithAppliedTxns(txns) proof = getattr(catchupReply, f.CONS_PROOF.nm) ledgerInfo = self.getLedgerInfoByType(ledgerId) verifier = ledgerInfo.verifier cp = ledgerInfo.catchUpTill finalSize = getattr(cp, f.SEQ_NO_END.nm) finalMTH = getattr(cp, f.NEW_MERKLE_ROOT.nm) try: logger.debug("{} verifying proof for {}, {}, {}, {}, {}".format( self, tempTree.tree_size, finalSize, tempTree.root_hash, Ledger.strToHash(finalMTH), [Ledger.strToHash(p) for p in proof])) verified = verifier.verify_tree_consistency( tempTree.tree_size, finalSize, tempTree.root_hash, Ledger.strToHash(finalMTH), [Ledger.strToHash(p) for p in proof]) except Exception as ex: logger.info("{} could not verify catchup reply {} since {}".format( self, catchupReply, ex)) verified = False return bool(verified), nodeName, len(txns)
def processCatchupReq(self, req: CatchupReq, frm: str): logger.debug("{} received catchup request: {} from {}".format( self, req, frm)) if not self.ownedByNode: self.discard(req, reason="Only node can serve catchup requests", logMethod=logger.warning) return start = getattr(req, f.SEQ_NO_START.nm) end = getattr(req, f.SEQ_NO_END.nm) ledger = self.getLedgerForMsg(req) if end < start: self.discard(req, reason="Invalid range", logMethod=logger.warning) return ledger_size = ledger.size if start > ledger_size: self.discard(req, reason="{} not able to service since " "ledger size is {} and start is {}".format( self, ledger_size, start), logMethod=logger.debug) return if req.catchupTill > ledger_size: self.discard(req, reason="{} not able to service since " "ledger size is {} and catchupTill is {}".format( self, ledger_size, req.catchupTill), logMethod=logger.debug) return # Adjusting for end greater than ledger size if end > ledger_size: logger.debug("{} does not have transactions till {} " "so sending only till {}".format( self, end, ledger_size)) end = ledger_size logger.debug("node {} requested catchup for {} from {} to {}".format( frm, end - start + 1, start, end)) logger.debug("{} generating consistency proof: {} from {}".format( self, end, req.catchupTill)) consProof = [ Ledger.hashToStr(p) for p in ledger.tree.consistency_proof(end, req.catchupTill) ] txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[seq_no] = self.owner.update_txn_with_extra_data(txn) self.sendTo(msg=CatchupRep(getattr(req, f.LEDGER_ID.nm), txns, consProof), to=frm)
def process_catchup_req(self, req: CatchupReq, frm: str): logger.info("{} received catchup request: {} from {}".format( self, req, frm)) ledger_id, ledger = self._get_ledger_and_id(req) if ledger is None: self._provider.discard(req, reason="it references invalid ledger", logMethod=logger.warning) return start = req.seqNoStart end = req.seqNoEnd if start > end: self._provider.discard( req, reason= "not able to service since start = {} greater than end = {}". format(start, end), logMethod=logger.debug) return if end > req.catchupTill: self._provider.discard( req, reason= "not able to service since end = {} greater than catchupTill = {}" .format(end, req.catchupTill), logMethod=logger.debug) return if req.catchupTill > ledger.size: self._provider.discard( req, reason= "not able to service since catchupTill = {} greater than ledger size = {}" .format(req.catchupTill, ledger.size), logMethod=logger.debug) return cons_proof = ledger.tree.consistency_proof(end, req.catchupTill) cons_proof = [Ledger.hashToStr(p) for p in cons_proof] txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[seq_no] = self._provider.update_txn_with_extra_data(txn) txns = SortedDict( txns) # TODO: Do we really need them sorted on the sending side? rep = CatchupRep(ledger_id, txns, cons_proof) message_splitter = self._make_splitter_for_catchup_rep( ledger, req.catchupTill) self._provider.send_to(rep, frm, message_splitter)
def database_manager(tdir_for_func): db = DatabaseManager() db.register_new_database( LEDGER_ID, Ledger(CompactMerkleTree(), dataDir=tdir_for_func), PruningState(KeyValueStorageRocksdb(tdir_for_func, 'kv1'))) db.register_new_store( TS_LABEL, StateTsDbStorage('test', {1: KeyValueStorageRocksdb(tdir_for_func, 'kv2')})) return db
def init_state_from_ledger(self, state: State, ledger: Ledger): """ If the trie is empty then initialize it by applying txns from ledger. """ if state.isEmpty: logger.info('{} found state to be empty, recreating from ' 'ledger'.format(self)) for seq_no, txn in ledger.getAllTxn(): txn = self.node.update_txn_with_extra_data(txn) self.node.write_manager.update_state(txn, isCommitted=True) state.commit(rootHash=state.headHash)
def _audit_txn_by_pp_seq_no(audit_ledger: Ledger, pp_seq_no: int) -> (dict, int): # TODO: Should we put it into some common code? seq_no = audit_ledger.size txn = None while seq_no > 0: txn = audit_ledger.getBySeqNo(seq_no) txn_data = get_payload_data(txn) audit_pp_seq_no = txn_data[AUDIT_TXN_PP_SEQ_NO] if audit_pp_seq_no == pp_seq_no: break seq_no -= 1 return txn, seq_no
def get_pool_ledger(node_name): config = getConfig() config_helper = NodeConfigHelper(node_name, config) genesis_txn_initiator = GenesisTxnInitiatorFromFile(config_helper.genesis_dir, config.poolTransactionsFile) hash_store = initHashStore(config_helper.ledger_dir, "pool", config) return Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=config_helper.ledger_dir, fileName=config.poolTransactionsFile, ensureDurability=config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator)
def ledger(self): data_dir = self.ledgerLocation genesis_txn_initiator = GenesisTxnInitiatorFromFile( self.genesis_dir, self.ledgerFile) tree = CompactMerkleTree(hashStore=self.hashStore) ledger = Ledger(tree, dataDir=data_dir, fileName=self.ledgerFile, ensureDurability=self.config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator) return ledger
def _audit_seq_no_from_3pc_key(audit_ledger: Ledger, view_no: int, pp_seq_no: int) -> int: # TODO: Should we put it into some common code? seq_no = audit_ledger.size while seq_no > 0: txn = audit_ledger.getBySeqNo(seq_no) txn_data = get_payload_data(txn) audit_view_no = txn_data[AUDIT_TXN_VIEW_NO] audit_pp_seq_no = txn_data[AUDIT_TXN_PP_SEQ_NO] if audit_view_no == view_no and audit_pp_seq_no == pp_seq_no: break seq_no -= 1 return seq_no
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare(0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def ledger(self): if self._ledger is None: genesis_txn_initiator = GenesisTxnInitiatorFromFile( self.basedirpath, self.ledgerFile) dataDir = self.ledgerLocation self.hashStore = LevelDbHashStore( dataDir=dataDir, fileNamePrefix='pool') self._ledger = Ledger( CompactMerkleTree( hashStore=self.hashStore), dataDir=dataDir, fileName=self.ledgerFile, ensureDurability=self.config.EnsureLedgerDurability, genesis_txn_initiator=genesis_txn_initiator) return self._ledger
def _create_ledger(self, name: str, genesis: Optional[GenesisTxnInitiator] = None) -> Ledger: hs_type = HS_MEMORY if self.data_location is None else None hash_store = initHashStore(self.data_location, name, self.config, hs_type=hs_type) txn_file_name = getattr(self.config, "{}TransactionsFile".format(name)) txn_log_storage = None if self.data_location is None: txn_log_storage = KeyValueStorageInMemory() return Ledger(CompactMerkleTree(hashStore=hash_store), dataDir=self.data_location, fileName=txn_file_name, transactionLogStore=txn_log_storage, ensureDurability=self.config.EnsureLedgerDurability, genesis_txn_initiator=genesis)
def build_broken_ledger_status(self, ledger_id): nonlocal next_size if ledger_id != DOMAIN_LEDGER_ID: return origMethod(ledger_id) size = self.domainLedger.size next_size = next_size + 1 if next_size < size else 1 print("new size {}".format(next_size)) newRootHash = Ledger.hashToStr( self.domainLedger.tree.merkle_tree_hash(0, next_size)) three_pc_key = self.three_phase_key_for_txn_seq_no(ledger_id, next_size) v, p = three_pc_key if three_pc_key else None, None ledgerStatus = LedgerStatus(1, next_size, v, p, newRootHash, CURRENT_PROTOCOL_VERSION) print("dl status {}".format(ledgerStatus)) return ledgerStatus
def _sendIncorrectTxns(self, req, frm): ledgerId = getattr(req, f.LEDGER_ID.nm) if ledgerId == DOMAIN_LEDGER_ID: logger.info("{} being malicious and sending incorrect transactions" " for catchup request {} from {}". format(self, req, frm)) start, end = getattr(req, f.SEQ_NO_START.nm), \ getattr(req, f.SEQ_NO_END.nm) ledger = self.getLedgerForMsg(req) txns = {} for seqNo, txn in ledger.getAllTxn(start, end): # Since the type of random request is `buy` if get_type(txn) == "buy": set_type(txn, "randombuy") txns[seqNo] = txn consProof = [Ledger.hashToStr(p) for p in ledger.tree.consistency_proof(end, ledger.size)] self.sendTo(msg=CatchupRep(getattr(req, f.LEDGER_ID.nm), txns, consProof), to=frm) else: self.processCatchupReq(req, frm)