def test_multiple_ledgers_in_second_batch_apply_first_time(txnPoolNodeSet): # First txn node = txnPoolNodeSet[0] audit_batch_handler = node.write_manager.audit_b_handler op = { TXN_TYPE: PlenumTransactions.NYM.value, TARGET_NYM: "000000000000000000000000Trustee4", ROLE: None } nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) domain_root_hash = Ledger.hashToStr(node.domainLedger.uncommittedRootHash) batch = get_3PC_batch(domain_root_hash) txn_data = audit_batch_handler._create_audit_txn_data(batch, audit_batch_handler.ledger.get_last_txn()) append_txn_to_ledger(txn_data, node.auditLedger, 2) # Checking rare case -- batch from two ledgers, that were never audited before op2 = { TXN_TYPE: PlenumTransactions.NODE.value, TARGET_NYM: "000000000000000000000000Trustee1", DATA: {ALIAS: "Node100"} } node_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(node_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) pool_root_hash = Ledger.hashToStr(node.poolLedger.uncommittedRootHash) pool_state_root = Ledger.hashToStr(node.states[0].headHash) config_root_hash = Ledger.hashToStr(node.configLedger.uncommittedRootHash) config_state_root = Ledger.hashToStr(node.states[2].headHash) batch = get_3PC_batch(pool_root_hash, ledger_id=0) txn_data = audit_batch_handler._create_audit_txn_data(batch, audit_batch_handler.ledger.get_last_txn()) assert txn_data[AUDIT_TXN_LEDGER_ROOT][0] == pool_root_hash assert txn_data[AUDIT_TXN_STATE_ROOT][0] == pool_state_root assert txn_data[AUDIT_TXN_LEDGER_ROOT][1] == 1 assert 1 not in txn_data[AUDIT_TXN_STATE_ROOT].keys() assert txn_data[AUDIT_TXN_LEDGER_ROOT][2] == config_root_hash assert txn_data[AUDIT_TXN_STATE_ROOT][2] == config_state_root
def test_audit_ledger_multiple_ledgers_in_one_batch(txnPoolNodeSet): # Checking first case -- first audit txn node = txnPoolNodeSet[0] audit_batch_handler = node.write_manager.audit_b_handler op = { TXN_TYPE: PlenumTransactions.NYM.value, TARGET_NYM: "000000000000000000000000Trustee4" } nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version1"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) domain_root_hash = Ledger.hashToStr(node.domainLedger.uncommittedRootHash) config_root_hash = Ledger.hashToStr(node.configLedger.uncommittedRootHash) domain_state_root = Ledger.hashToStr(node.states[1].headHash) config_state_root = Ledger.hashToStr(node.states[2].headHash) batch = get_3PC_batch(domain_root_hash) txn_data = audit_batch_handler._create_audit_txn_data( batch, audit_batch_handler.ledger.get_last_txn()) append_txn_to_ledger(txn_data, node.auditLedger, 1) assert txn_data[AUDIT_TXN_LEDGER_ROOT][1] == domain_root_hash assert txn_data[AUDIT_TXN_LEDGER_ROOT][2] == config_root_hash assert txn_data[AUDIT_TXN_STATE_ROOT][1] == domain_state_root assert txn_data[AUDIT_TXN_STATE_ROOT][2] == config_state_root # Checking usual case -- double update not in a first transaction op = { TXN_TYPE: PlenumTransactions.NYM.value, TARGET_NYM: "000000000000000000000000Trustee5" } nym_req = sdk_gen_request(op, signatures={"sig1": "111"}) node.write_manager.apply_request(nym_req, 10000) op2 = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_AML, AML_VERSION: "version2"} pool_config_req = sdk_gen_request(op2, signatures={"sig1": "111"}) node.write_manager.apply_request(pool_config_req, 10000) # Checking second batch created domain_root_hash_2 = Ledger.hashToStr( node.domainLedger.uncommittedRootHash) config_root_hash_2 = Ledger.hashToStr( node.configLedger.uncommittedRootHash) domain_state_root_2 = Ledger.hashToStr(node.states[1].headHash) config_state_root_2 = Ledger.hashToStr(node.states[2].headHash) batch = get_3PC_batch(domain_root_hash_2) txn_data = audit_batch_handler._create_audit_txn_data( batch, audit_batch_handler.ledger.get_last_txn()) # Checking first batch created assert txn_data[AUDIT_TXN_LEDGER_ROOT][1] == domain_root_hash_2 assert txn_data[AUDIT_TXN_LEDGER_ROOT][2] == config_root_hash_2 assert txn_data[AUDIT_TXN_STATE_ROOT][1] == domain_state_root_2 assert txn_data[AUDIT_TXN_STATE_ROOT][2] == config_state_root_2
def _build_consistency_proof( self, ledger_id: int, seq_no_start: int, seq_no_end: int) -> Optional[ConsistencyProof]: ledger = self._provider.ledger(ledger_id) if seq_no_end < seq_no_start: logger.error( "{} cannot build consistency proof: end {} is less than start {}" .format(self, seq_no_end, seq_no_start)) return if seq_no_start > ledger.size: logger.error( "{} cannot build consistency proof: start {} is more than ledger size {}" .format(self, seq_no_start, ledger.size)) return if seq_no_end > ledger.size: logger.error( "{} cannot build consistency proof: end {} is more than ledger size {}" .format(self, seq_no_end, ledger.size)) return if seq_no_start == 0: # Consistency proof for an empty tree cannot exist. Using the root # hash now so that the node which is behind can verify that # TODO: Make this an empty list old_root = ledger.tree.root_hash old_root = Ledger.hashToStr(old_root) proof = [ old_root, ] else: proof = self._make_consistency_proof(ledger, seq_no_start, seq_no_end) old_root = ledger.tree.merkle_tree_hash(0, seq_no_start) old_root = Ledger.hashToStr(old_root) new_root = ledger.tree.merkle_tree_hash(0, seq_no_end) new_root = Ledger.hashToStr(new_root) # TODO: Delete when INDY-1946 gets implemented three_pc_key = self._provider.three_phase_key_for_txn_seq_no( ledger_id, seq_no_end) view_no, pp_seq_no = three_pc_key if three_pc_key else (0, 0) return ConsistencyProof(ledger_id, seq_no_start, seq_no_end, view_no, pp_seq_no, old_root, new_root, proof)
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare( 0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def _make_consistency_proof(self, ledger, end, catchup_till): # TODO: make catchup_till optional # if catchup_till is None: # catchup_till = ledger.size proof = ledger.tree.consistency_proof(end, catchup_till) string_proof = [Ledger.hashToStr(p) for p in proof] return string_proof
def _add_txns_to_ledger(node, looper, sdk_wallet_client, num_txns_in_reply, reply_count): ''' Add txn_count transactions to node's ledger and return ConsistencyProof for all new transactions and list of CatchupReplies :return: ConsistencyProof, list of CatchupReplies ''' txn_count = num_txns_in_reply * reply_count ledger_manager = node.ledgerManager ledger = ledger_manager.ledgerRegistry[ledger_id].ledger catchup_rep_service = ledger_manager._node_leecher._leechers[ledger_id]._catchup_rep_service reqs = sdk_signed_random_requests(looper, sdk_wallet_client, txn_count) # add transactions to ledger for req in reqs: txn = append_txn_metadata(reqToTxn(req), txn_time=12345678) catchup_rep_service._add_txn(txn) # generate CatchupReps replies = [] for i in range(ledger.seqNo - txn_count + 1, ledger.seqNo + 1, num_txns_in_reply): start = i end = i + num_txns_in_reply - 1 cons_proof = ledger_manager._node_seeder._make_consistency_proof(ledger, end, ledger.size) txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[str(seq_no)] = ledger_manager.owner.update_txn_with_extra_data(txn) replies.append(CatchupRep(ledger_id, SortedDict(txns), cons_proof)) three_pc_key = node.three_phase_key_for_txn_seq_no(ledger_id, ledger.seqNo) view_no, pp_seq_no = three_pc_key if three_pc_key else (0, 0) return CatchupTill(start_size=ledger.seqNo - txn_count, final_size=ledger.seqNo, final_hash=Ledger.hashToStr(ledger.tree.merkle_tree_hash(0, ledger.seqNo)), view_no=view_no, pp_seq_no=pp_seq_no), replies
def _buildConsistencyProof(self, ledgerId, seqNoStart, seqNoEnd): ledger = self.getLedgerInfoByType(ledgerId).ledger ledgerSize = ledger.size if seqNoStart > ledgerSize: logger.error("{} cannot build consistency proof from {} " "since its ledger size is {}".format( self, seqNoStart, ledgerSize)) return if seqNoEnd > ledgerSize: logger.error("{} cannot build consistency " "proof till {} since its ledger size is {}".format( self, seqNoEnd, ledgerSize)) return if seqNoEnd < seqNoStart: self.error('{} cannot build consistency proof since end {} is ' 'lesser than start {}'.format(self, seqNoEnd, seqNoStart)) return if seqNoStart == 0: # Consistency proof for an empty tree cannot exist. Using the root # hash now so that the node which is behind can verify that # TODO: Make this an empty list oldRoot = ledger.tree.root_hash proof = [ oldRoot, ] else: proof = ledger.tree.consistency_proof(seqNoStart, seqNoEnd) oldRoot = ledger.tree.merkle_tree_hash(0, seqNoStart) newRoot = ledger.tree.merkle_tree_hash(0, seqNoEnd) key = self.owner.three_phase_key_for_txn_seq_no(ledgerId, seqNoEnd) logger.debug('{} found 3 phase key {} for ledger {} seqNo {}'.format( self, key, ledgerId, seqNoEnd)) if key is None: # The node receiving consistency proof should check if it has # received this sentinel 3 phase key (0, 0) in spite of seeing a # non-zero txn seq no key = (0, 0) return ConsistencyProof(ledgerId, seqNoStart, seqNoEnd, *key, Ledger.hashToStr(oldRoot), Ledger.hashToStr(newRoot), [Ledger.hashToStr(p) for p in proof])
def _calc_catchup_till(self) -> Dict[int, CatchupTill]: audit_ledger = self._provider.ledger(AUDIT_LEDGER_ID) last_audit_txn = audit_ledger.get_last_committed_txn() if last_audit_txn is None: return {} catchup_till = {} last_audit_txn = get_payload_data(last_audit_txn) for ledger_id, final_size in last_audit_txn[ AUDIT_TXN_LEDGERS_SIZE].items(): ledger = self._provider.ledger(ledger_id) if ledger is None: logger.debug( "{} has audit ledger with references to nonexistent " "ledger with ID {}. Maybe it was frozen.".format( self, ledger_id)) continue start_size = ledger.size final_hash = last_audit_txn[AUDIT_TXN_LEDGER_ROOT].get(ledger_id) if final_hash is None: if final_size != ledger.size: logger.error( "{} has corrupted audit ledger: " "it indicates that ledger {} has new transactions but doesn't have new txn root" .format(self, ledger_id)) return {} final_hash = Ledger.hashToStr( ledger.tree.root_hash) if final_size > 0 else None if isinstance(final_hash, int): audit_txn = audit_ledger.getBySeqNo(audit_ledger.size - final_hash) if audit_txn is None: logger.error( "{} has corrupted audit ledger: " "its txn root for ledger {} references nonexistent txn with seq_no {} - {} = {}" .format(self, ledger_id, audit_ledger.size, final_hash, audit_ledger.size - final_hash)) return {} audit_txn = get_payload_data(audit_txn) final_hash = audit_txn[AUDIT_TXN_LEDGER_ROOT].get(ledger_id) if not isinstance(final_hash, str): logger.error( "{} has corrupted audit ledger: " "its txn root for ledger {} references txn with seq_no {} - {} = {} " "which doesn't contain txn root".format( self, ledger_id, audit_ledger.size, final_hash, audit_ledger.size - final_hash)) return {} catchup_till[ledger_id] = CatchupTill(start_size=start_size, final_size=final_size, final_hash=final_hash) return catchup_till
def processCatchupReq(self, req: CatchupReq, frm: str): logger.debug("{} received catchup request: {} from {}".format( self, req, frm)) if not self.ownedByNode: self.discard(req, reason="Only node can serve catchup requests", logMethod=logger.warning) return start = getattr(req, f.SEQ_NO_START.nm) end = getattr(req, f.SEQ_NO_END.nm) ledger = self.getLedgerForMsg(req) if end < start: self.discard(req, reason="Invalid range", logMethod=logger.warning) return ledger_size = ledger.size if start > ledger_size: self.discard(req, reason="{} not able to service since " "ledger size is {} and start is {}".format( self, ledger_size, start), logMethod=logger.debug) return if req.catchupTill > ledger_size: self.discard(req, reason="{} not able to service since " "ledger size is {} and catchupTill is {}".format( self, ledger_size, req.catchupTill), logMethod=logger.debug) return # Adjusting for end greater than ledger size if end > ledger_size: logger.debug("{} does not have transactions till {} " "so sending only till {}".format( self, end, ledger_size)) end = ledger_size logger.debug("node {} requested catchup for {} from {} to {}".format( frm, end - start + 1, start, end)) logger.debug("{} generating consistency proof: {} from {}".format( self, end, req.catchupTill)) consProof = [ Ledger.hashToStr(p) for p in ledger.tree.consistency_proof(end, req.catchupTill) ] txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[seq_no] = self.owner.update_txn_with_extra_data(txn) self.sendTo(msg=CatchupRep(getattr(req, f.LEDGER_ID.nm), txns, consProof), to=frm)
def process_catchup_req(self, req: CatchupReq, frm: str): logger.info("{} received catchup request: {} from {}".format( self, req, frm)) ledger_id, ledger = self._get_ledger_and_id(req) if ledger is None: self._provider.discard(req, reason="it references invalid ledger", logMethod=logger.warning) return start = req.seqNoStart end = req.seqNoEnd if start > end: self._provider.discard( req, reason= "not able to service since start = {} greater than end = {}". format(start, end), logMethod=logger.debug) return if end > req.catchupTill: self._provider.discard( req, reason= "not able to service since end = {} greater than catchupTill = {}" .format(end, req.catchupTill), logMethod=logger.debug) return if req.catchupTill > ledger.size: self._provider.discard( req, reason= "not able to service since catchupTill = {} greater than ledger size = {}" .format(req.catchupTill, ledger.size), logMethod=logger.debug) return cons_proof = ledger.tree.consistency_proof(end, req.catchupTill) cons_proof = [Ledger.hashToStr(p) for p in cons_proof] txns = {} for seq_no, txn in ledger.getAllTxn(start, end): txns[seq_no] = self._provider.update_txn_with_extra_data(txn) txns = SortedDict( txns) # TODO: Do we really need them sorted on the sending side? rep = CatchupRep(ledger_id, txns, cons_proof) message_splitter = self._make_splitter_for_catchup_rep( ledger, req.catchupTill) self._provider.send_to(rep, frm, message_splitter)
def test_msg_len_limit_large_enough_for_preprepare(): config = getConfig() batch_size = config.Max3PCBatchSize requests = [Request(signatures={})] * batch_size req_idr = [req.digest for req in requests] digest = Replica.batchDigest(requests) state_root = Base58Serializer().serialize(BLANK_ROOT) txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash) pp = PrePrepare(0, 0, 0, get_utc_epoch(), req_idr, init_discarded(), digest, 0, state_root, txn_root, 0, True) assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
def build_broken_ledger_status(self, ledger_id): nonlocal next_size if ledger_id != DOMAIN_LEDGER_ID: return origMethod(ledger_id) size = self.primaryStorage.size next_size = next_size + 1 if next_size < size else 1 print("new size {}".format(next_size)) newRootHash = Ledger.hashToStr( self.domainLedger.tree.merkle_tree_hash(0, next_size)) three_pc_key = self.three_phase_key_for_txn_seq_no( ledger_id, next_size) v, p = three_pc_key if three_pc_key else None, None ledgerStatus = LedgerStatus(1, next_size, v, p, newRootHash) print("dl status {}".format(ledgerStatus)) return ledgerStatus
def build_broken_ledger_status(self, ledger_id): nonlocal next_size if ledger_id != DOMAIN_LEDGER_ID: return origMethod(ledger_id) size = self.domainLedger.size next_size = next_size + 1 if next_size < size else 1 print("new size {}".format(next_size)) newRootHash = Ledger.hashToStr( self.domainLedger.tree.merkle_tree_hash(0, next_size)) three_pc_key = self.three_phase_key_for_txn_seq_no(ledger_id, next_size) v, p = three_pc_key if three_pc_key else None, None ledgerStatus = LedgerStatus(1, next_size, v, p, newRootHash, CURRENT_PROTOCOL_VERSION) print("dl status {}".format(ledgerStatus)) return ledgerStatus
def build_broken_ledger_status(ledger_id: int, provider: CatchupDataProvider): nonlocal next_size if provider.node_name() != lagging_node.name: return orig_method(ledger_id, provider) if ledger_id != AUDIT_LEDGER_ID: return orig_method(ledger_id, provider) audit_ledger = provider.ledger(AUDIT_LEDGER_ID) size = audit_ledger.size next_size = next_size + 1 if next_size < size else 1 print("new size {}".format(next_size)) newRootHash = Ledger.hashToStr( audit_ledger.tree.merkle_tree_hash(0, next_size)) ledgerStatus = LedgerStatus(AUDIT_LEDGER_ID, next_size, 0, 0, newRootHash, CURRENT_PROTOCOL_VERSION) logger.info("audit status {}".format(ledgerStatus)) return ledgerStatus
def _sendIncorrectTxns(self, req, frm): ledgerId = getattr(req, f.LEDGER_ID.nm) if ledgerId == DOMAIN_LEDGER_ID: logger.info("{} being malicious and sending incorrect transactions" " for catchup request {} from {}". format(self, req, frm)) start, end = getattr(req, f.SEQ_NO_START.nm), \ getattr(req, f.SEQ_NO_END.nm) ledger = self.getLedgerForMsg(req) txns = {} for seqNo, txn in ledger.getAllTxn(start, end): # Since the type of random request is `buy` if txn.get(TXN_TYPE) == "buy": txn[TXN_TYPE] = "randomtype" txns[seqNo] = txn consProof = [Ledger.hashToStr(p) for p in ledger.tree.consistency_proof(end, ledger.size)] self.sendTo(msg=CatchupRep(getattr(req, f.LEDGER_ID.nm), txns, consProof), to=frm) else: self.processCatchupReq(req, frm)
def _sendIncorrectTxns(self, req, frm): ledgerId = getattr(req, f.LEDGER_ID.nm) if ledgerId == DOMAIN_LEDGER_ID: logger.info("{} being malicious and sending incorrect transactions" " for catchup request {} from {}". format(self, req, frm)) start, end = getattr(req, f.SEQ_NO_START.nm), \ getattr(req, f.SEQ_NO_END.nm) ledger = self.getLedgerForMsg(req) txns = {} for seqNo, txn in ledger.getAllTxn(start, end): # Since the type of random request is `buy` if get_type(txn) == "buy": set_type(txn, "randombuy") txns[seqNo] = txn consProof = [Ledger.hashToStr(p) for p in ledger.tree.consistency_proof(end, ledger.size)] self.sendTo(msg=CatchupRep(getattr(req, f.LEDGER_ID.nm), txns, consProof), to=frm) else: self.processCatchupReq(req, frm)
def build_broken_ledger_status(ledger_id: int, provider: CatchupDataProvider): nonlocal next_size if provider.node_name() != new_node.name: return origMethod(ledger_id, provider) if ledger_id != DOMAIN_LEDGER_ID: return origMethod(ledger_id, provider) domain_ledger = provider.ledger(DOMAIN_LEDGER_ID) size = domain_ledger.size next_size = next_size + 1 if next_size < size else 1 print("new size {}".format(next_size)) newRootHash = Ledger.hashToStr( domain_ledger.tree.merkle_tree_hash(0, next_size)) three_pc_key = provider.three_phase_key_for_txn_seq_no( ledger_id, next_size) v, p = three_pc_key if three_pc_key else None, None ledgerStatus = LedgerStatus(1, next_size, v, p, newRootHash, CURRENT_PROTOCOL_VERSION) print("dl status {}".format(ledgerStatus)) return ledgerStatus
def _make_consistency_proof(ledger: Ledger, seq_no_start: int, seq_no_end: int): proof = ledger.tree.consistency_proof(seq_no_start, seq_no_end) string_proof = [Ledger.hashToStr(p) for p in proof] return string_proof