def testPrePrepareWithHighSeqNo(looper, txnPoolNodeSet, propagated1): def chk(): for r in getNonPrimaryReplicas(txnPoolNodeSet, instId): nodeSuspicions = len(getNodeSuspicions( r.node, Suspicions.WRONG_PPSEQ_NO.code)) assert nodeSuspicions == 1 def checkPreprepare(replica, viewNo, ppSeqNo, req, numOfPrePrepares): assert (replica.prePrepares[viewNo, ppSeqNo][0]) == \ (req.identifier, req.reqId, req.digest) primary = getPrimaryReplica(txnPoolNodeSet, instId) nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, instId) req = propagated1.reqDigest primary.doPrePrepare(req) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) for np in nonPrimaryReplicas: looper.run( eventually(checkPreprepare, np, primary.viewNo, primary.lastPrePrepareSeqNo - 1, req, 1, retryWait=.5, timeout=timeout)) newReqDigest = (req.identifier, req.reqId + 1, req.digest) incorrectPrePrepareReq = PrePrepare(instId, primary.viewNo, primary.lastPrePrepareSeqNo + 2, *newReqDigest, get_utc_epoch()) primary.send(incorrectPrePrepareReq, TPCStat.PrePrepareSent) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=1, timeout=timeout))
def test_belated_request_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.clientIbStasher.delay(req_delay(300)) for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_signed_requests(sdk_pool_handle, one_req) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) delta.clientIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: node.nodeIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedOrderingTime(delta.replicas.num_replicas)) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def preprepared1WithDelay(looper, txnPoolNodeSet, propagated1, faultyNodes): timeouts = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delayPrePrepareSec checkPrePrepared(looper, txnPoolNodeSet, propagated1, range(getNoInstances(len(txnPoolNodeSet))), faultyNodes, timeout=timeouts)
def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): """ Send commits but delay pre-prepare and prepares such that enough commits are received, now the request should not be ordered until pre-prepare is received and ordering should just happen once, """ delay = 10 non_prim_reps = getNonPrimaryReplicas(txnPoolNodeSet, 0) slow_rep = non_prim_reps[0] slow_node = slow_rep.node slow_node.nodeIbStasher.delay(ppDelay(delay, 0)) slow_node.nodeIbStasher.delay(pDelay(delay, 0)) stash_pp = [] stash_p = [] orig_pp_method = slow_rep.processPrePrepare orig_p_method = slow_rep.processPrepare def patched_pp(self, msg, sender): stash_pp.append((msg, sender)) def patched_p(self, msg, sender): stash_p.append((msg, sender)) slow_rep.processPrePrepare = \ types.MethodType(patched_pp, slow_rep) slow_rep.processPrepare = \ types.MethodType(patched_p, slow_rep) def chk1(): assert len(slow_rep.commitsWaitingForPrepare) > 0 sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay looper.run(eventually(chk1, retryWait=1, timeout=timeout)) for m, s in stash_pp: orig_pp_method(m, s) for m, s in stash_p: orig_p_method(m, s) def chk2(): assert len(slow_rep.commitsWaitingForPrepare) == 0 assert slow_rep.spylog.count(slow_rep.doOrder.__name__) == 1 timeout = waits.expectedOrderingTime(len(non_prim_reps) + 1) + 2 * delay looper.run(eventually(chk2, retryWait=1, timeout=timeout))
def test_revert_works_for_fees_after_view_change( looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, fees_set, mint_tokens, addresses, fees): node_set = nodeSetWithIntegratedTokenPlugin current_amount = get_amount_from_token_txn(mint_tokens) seq_no = get_seq_no(mint_tokens) reverted_node = nodeSetWithIntegratedTokenPlugin[-1] current_amount, seq_no, _ = send_and_check_nym_with_fees( helpers, fees_set, seq_no, looper, addresses, current_amount) current_amount, seq_no, _ = send_and_check_transfer( helpers, addresses, fees, looper, current_amount, seq_no) with delay_rules_without_processing(reverted_node.nodeIbStasher, cDelay()): len_batches_before = len(reverted_node.master_replica.batches) current_amount, seq_no, _ = send_and_check_transfer( helpers, addresses, fees, looper, current_amount, seq_no) current_amount, seq_no, _ = send_and_check_nym_with_fees( helpers, fees_set, seq_no, looper, addresses, current_amount) looper.runFor( waits.expectedPrePrepareTime( len(nodeSetWithIntegratedTokenPlugin))) len_batches_after = len(reverted_node.master_replica.batches) """ Checks, that we have a 2 new batches """ assert len_batches_after - len_batches_before == 2 for n in node_set: n.view_changer.on_master_degradation() ensure_view_change(looper, nodeSetWithIntegratedTokenPlugin) looper.run( eventually( lambda: assertExp(reverted_node.mode == Mode.participating))) ensure_all_nodes_have_same_data(looper, node_set) send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses, current_amount) ensure_all_nodes_have_same_data(looper, node_set)
def test_revert_nym_with_fees_before_catchup(looper, helpers, nodeSetWithIntegratedTokenPlugin, fees_set, fees, xfer_mint_tokens, xfer_addresses): nodes = nodeSetWithIntegratedTokenPlugin current_amount = get_amount_from_token_txn(xfer_mint_tokens) seq_no = get_seq_no(xfer_mint_tokens) lagging_node = nodes[-1] current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, xfer_addresses, current_amount) with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(), pDelay()): current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, xfer_addresses, current_amount) looper.runFor(waits.expectedPrePrepareTime(len(nodes))) lagging_node.start_catchup() for n in nodes: looper.run(eventually(lambda: assertExp(n.mode == Mode.participating))) for n in nodes: looper.run(eventually(check_state, n, True, retryWait=0.2, timeout=15)) ensure_all_nodes_have_same_data(looper, nodes) current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, xfer_addresses, current_amount) ensure_all_nodes_have_same_data(looper, nodes)
def test_unordered_request_freed_on_replica_removal(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, chkFreqPatched, view_change): node = txnPoolNodeSet[0] stashers = [n.nodeIbStasher for n in txnPoolNodeSet] with delay_rules(stashers, cDelay(delay=sys.maxsize)): req = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) assert len(node.requests) == 1 forwardedToBefore = next(iter(node.requests.values())).forwardedTo node.replicas.remove_replica(node.replicas.num_replicas - 1) assert len(node.requests) == 1 forwardedToAfter = next(iter(node.requests.values())).forwardedTo assert forwardedToAfter == forwardedToBefore - 1 chkChkpoints(txnPoolNodeSet, 0) sdk_get_replies(looper, req) chkChkpoints(txnPoolNodeSet, 1) # Send one more request to stabilize checkpoint sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, 0)) assert len(node.requests) == 0
def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and sequence number "n" """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) primaryRepl = getPrimaryReplica(nodeSet, 1) logger.debug("Primary Replica: {}".format(primaryRepl)) nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet, 1) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) # Delay COMMITs so request is not ordered and checks can be made c_delay = 10 for node in nodeSet: node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1)) request1 = sendRandomRequest(wallet1, client1) for npr in nonPrimaryReplicas: looper.run( eventually(checkPrepareReqSent, npr, request1.identifier, request1.reqId, primaryRepl.viewNo, retryWait=1)) prePrepareReq = primaryRepl.sentPrePrepares[ primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo] looper.run( eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1)) # logger.debug("Patching the primary replica's pre-prepare sending method ") # orig_method = primaryRepl.sendPrePrepare # def patched(self, ppReq): # self.sentPrePrepares[ppReq.viewNo, ppReq.ppSeqNo] = ppReq # ppReq = updateNamedTuple(ppReq, **{f.PP_SEQ_NO.nm: 1}) # self.send(ppReq, TPCStat.PrePrepareSent) # # primaryRepl.sendPrePrepare = types.MethodType(patched, primaryRepl) logger.debug( "Decrementing the primary replica's pre-prepare sequence number by " "one...") primaryRepl._lastPrePrepareSeqNo -= 1 view_no = primaryRepl.viewNo request2 = sendRandomRequest(wallet1, client1) timeout = waits.expectedPrePrepareTime(len(nodeSet)) looper.run( eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=timeout)) # Since the node is malicious, it will not be able to process requests due # to conflicts in PRE-PREPARE primaryRepl.node.stop() looper.removeProdable(primaryRepl.node) reqIdr = [(request2.identifier, request2.reqId)] prePrepareReq = PrePrepare(primaryRepl.instId, primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo, time.time(), reqIdr, 1, primaryRepl.batchDigest([request2]), DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID)) logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") timeout = waits.expectedPrePrepareTime(len(nodeSet)) looper.run( eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1, timeout=timeout)) logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") timeout = waits.expectedPrepareTime(len(nodeSet)) looper.runFor(timeout) # expect prepare processing timeout # check if prepares have not been sent for npr in nonPrimaryReplicas: with pytest.raises(AssertionError): looper.run( eventually(checkPrepareReqSent, npr, request2.identifier, request2.reqId, view_no, retryWait=1, timeout=timeout)) timeout = waits.expectedTransactionExecutionTime(len(nodeSet)) + c_delay result1 = looper.run( eventually(checkSufficientRepliesReceived, client1.inBox, request1.reqId, fValue, retryWait=1, timeout=timeout)) logger.debug("request {} gives result {}".format(request1, result1))
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func): nodeNames = genNodeNames(4) nodeReg = genNodeReg(names=nodeNames) with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: prepareNodeSet(looper, nodeSet) nodeA, nodeB, nodeC, nodeD = tuple( addNodeBack(nodeSet, looper, nodeNames[i]) for i in range(0, 4)) # Since primary selection is round robin, A and B will be primaries # Nodes C and D delays self nomination so A and B can become # primaries # nodeC.delaySelfNomination(10) # nodeD.delaySelfNomination(10) # Node D delays receiving PRIMARY messages from all nodes so it # will not know whether it is primary or not # delayD = 5 # nodeD.nodeIbStasher.delay(delayerMsgTuple(delayD, Primary)) checkPoolReady(looper=looper, nodes=nodeSet) # client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func) # request = sendRandomRequest(wal, client1) # TODO Rethink this instNo = 0 timeout = waits.expectedClientRequestPropagationTime(len(nodeSet)) for i in range(3): node = nodeSet.getNode(nodeNames[i]) # Nodes A, B and C should have received PROPAGATE request # from Node D looper.run( eventually(checkIfPropagateRecvdFromNode, node, nodeD, request.identifier, request.reqId, retryWait=1, timeout=timeout)) def assert_msg_count(typ, count): assert len( getPendingRequestsForReplica(nodeD.replicas[instNo], typ)) == count # Node D should have 1 pending PRE-PREPARE request timeout = waits.expectedPrePrepareTime(len(nodeSet)) looper.run( eventually(assert_msg_count, PrePrepare, 1, retryWait=1, timeout=timeout)) # Node D should have 2 pending PREPARE requests(from node B and C) timeout = waits.expectedPrepareTime(len(nodeSet)) looper.run( eventually(assert_msg_count, Prepare, 2, retryWait=1, timeout=timeout)) # Its been checked above that replica stashes 3 phase messages in # lack of primary, now avoid delay (fix the network) nodeD.nodeIbStasher.reset_delays_and_process_delayeds() # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT # requests for reqType in [PrePrepare, Prepare, Commit]: looper.run( eventually(lambda: assertLength( getPendingRequestsForReplica(nodeD.replicas[instNo], reqType), 0), retryWait=1, timeout=delayD)) # wait little more than delay
def test_ordering_with_fees_and_without_fees(looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle, sdk_wallet_steward, fees_set, address_main, mint_tokens, fees): node_set = nodeSetWithIntegratedTokenPlugin node_stashers = [n.nodeIbStasher for n in nodeSetWithIntegratedTokenPlugin] committed_tokens_before = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) committed_domain_before = get_committed_txns_count_for_pool( node_set, DOMAIN_LEDGER_ID) """ We will try to send a 1 NYM txn with fees and 1 NYM without fees and 1 with fees In that case we expect, that we will have 3 domain txn and 2 token txn in ledgers """ expected_domain_txns_count = 3 expected_token_txns_count = 2 with delay_rules(node_stashers, cDelay()): amount = get_amount_from_token_txn(mint_tokens) init_seq_no = 1 request_1, request_2 = nyms_with_fees(2, helpers, fees_set, address_main, amount, init_seq_no=init_seq_no) """ Sending 1 NYM txn with fees """ r_with_1 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request_1) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Unset fees for pool """ r_unset_fees = helpers.general.set_fees_without_waiting( {k: 0 for (k, v) in fees.items()}) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Sending 1 NYM txn without fees """ r_without = sdk_send_new_nym(looper, sdk_pool_handle, sdk_wallet_steward) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Set fees for pool """ r_set_fees = helpers.general.set_fees_without_waiting(fees) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Send another NYM txn with fees """ r_with_2 = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request_2) looper.runFor(waits.expectedPrePrepareTime(len(node_set))) """ Reset delays and check, that all txns was ordered successfully """ sdk_get_and_check_replies(looper, [r_with_1]) sdk_get_and_check_replies(looper, r_unset_fees) sdk_get_and_check_replies(looper, r_without) sdk_get_and_check_replies(looper, r_set_fees) sdk_get_and_check_replies(looper, [r_with_2]) committed_tokens_after = get_committed_txns_count_for_pool( node_set, TOKEN_LEDGER_ID) committed_domain_after = get_committed_txns_count_for_pool( node_set, DOMAIN_LEDGER_ID) assert committed_domain_after - committed_domain_before == expected_domain_txns_count assert committed_tokens_after - committed_tokens_before == expected_token_txns_count ensure_all_nodes_have_same_data(looper, nodeSetWithIntegratedTokenPlugin)
def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and sequence number "n" """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) primaryRepl = getPrimaryReplica(txnPoolNodeSet, 1) logger.debug("Primary Replica: {}".format(primaryRepl)) nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, 1) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) # Delay COMMITs so request is not ordered and checks can be made c_delay = 10 for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1)) req1 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0] request1 = sdk_json_to_request_object(req1[0]) for npr in nonPrimaryReplicas: looper.run(eventually(checkPrepareReqSent, npr, request1.key, primaryRepl.viewNo, retryWait=1)) prePrepareReq = primaryRepl._ordering_service.sent_preprepares[primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo] looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1)) # logger.debug("Patching the primary replica's pre-prepare sending method ") # orig_method = primaryRepl.sendPrePrepare # def patched(self, ppReq): # self._ordering_service.sent_preprepares[ppReq.viewNo, ppReq.ppSeqNo] = ppReq # ppReq = updateNamedTuple(ppReq, **{f.PP_SEQ_NO.nm: 1}) # self.send(ppReq, TPCStat.PrePrepareSent) # # primaryRepl.sendPrePrepare = types.MethodType(patched, primaryRepl) logger.debug( "Decrementing the primary replica's pre-prepare sequence number by " "one...") primaryRepl._ordering_service._lastPrePrepareSeqNo -= 1 view_no = primaryRepl.viewNo request2 = sdk_json_to_request_object( sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0][0]) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=timeout)) # Since the node is malicious, it will not be able to process requests due # to conflicts in PRE-PREPARE primaryRepl.node.stop() looper.removeProdable(primaryRepl.node) reqIdr = [request2.digest] tm = get_utc_epoch() prePrepareReq = PrePrepare( primaryRepl.instId, view_no, primaryRepl.lastPrePrepareSeqNo, tm, reqIdr, init_discarded(), primaryRepl._ordering_service.generate_pp_digest([request2.digest], view_no, tm), DOMAIN_LEDGER_ID, primaryRepl._ordering_service.get_state_root_hash(DOMAIN_LEDGER_ID), primaryRepl._ordering_service.get_txn_root_hash(DOMAIN_LEDGER_ID), 0, True ) logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1, timeout=timeout)) logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") timeout = waits.expectedPrepareTime(len(txnPoolNodeSet)) looper.runFor(timeout) # expect prepare processing timeout # check if prepares have not been sent for npr in nonPrimaryReplicas: with pytest.raises(AssertionError): looper.run(eventually(checkPrepareReqSent, npr, request2.key, view_no, retryWait=1, timeout=timeout)) timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + c_delay result1 = sdk_get_replies(looper, [req1])[0][1] logger.debug("request {} gives result {}".format(request1, result1))
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func): nodeNames = genNodeNames(4) nodeReg = genNodeReg(names=nodeNames) with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: prepareNodeSet(looper, nodeSet) nodeA, nodeB, nodeC, nodeD = tuple( addNodeBack( nodeSet, looper, nodeNames[i]) for i in range( 0, 4)) # Since primary selection is round robin, A and B will be primaries # Nodes C and D delays self nomination so A and B can become # primaries # nodeC.delaySelfNomination(10) # nodeD.delaySelfNomination(10) # Node D delays receiving PRIMARY messages from all nodes so it # will not know whether it is primary or not # delayD = 5 # nodeD.nodeIbStasher.delay(delayerMsgTuple(delayD, Primary)) checkPoolReady(looper=looper, nodes=nodeSet) # client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func) # request = sendRandomRequest(wal, client1) # TODO Rethink this instNo = 0 timeout = waits.expectedClientRequestPropagationTime(len(nodeSet)) for i in range(3): node = nodeSet.getNode(nodeNames[i]) # Nodes A, B and C should have received PROPAGATE request # from Node D looper.run( eventually(checkIfPropagateRecvdFromNode, node, nodeD, request.identifier, request.reqId, retryWait=1, timeout=timeout)) def assert_msg_count(typ, count): assert len(getPendingRequestsForReplica(nodeD.replicas[instNo], typ)) == count # Node D should have 1 pending PRE-PREPARE request timeout = waits.expectedPrePrepareTime(len(nodeSet)) looper.run(eventually(assert_msg_count, PrePrepare, 1, retryWait=1, timeout=timeout)) # Node D should have 2 pending PREPARE requests(from node B and C) timeout = waits.expectedPrepareTime(len(nodeSet)) looper.run(eventually(assert_msg_count, Prepare, 2, retryWait=1, timeout=timeout)) # Its been checked above that replica stashes 3 phase messages in # lack of primary, now avoid delay (fix the network) nodeD.nodeIbStasher.reset_delays_and_process_delayeds() # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT # requests for reqType in [PrePrepare, Prepare, Commit]: looper.run( eventually( lambda: assertLength( getPendingRequestsForReplica( nodeD.replicas[instNo], reqType), 0), retryWait=1, timeout=delayD)) # wait little more than delay
def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and sequence number "n" """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) primaryRepl = getPrimaryReplica(txnPoolNodeSet, 1) logger.debug("Primary Replica: {}".format(primaryRepl)) nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, 1) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) # Delay COMMITs so request is not ordered and checks can be made c_delay = 10 for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1)) req1 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0] request1 = sdk_json_to_request_object(req1[0]) for npr in nonPrimaryReplicas: looper.run(eventually(checkPrepareReqSent, npr, request1.key, primaryRepl.viewNo, retryWait=1)) prePrepareReq = primaryRepl.sentPrePrepares[primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo] looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1)) # logger.debug("Patching the primary replica's pre-prepare sending method ") # orig_method = primaryRepl.sendPrePrepare # def patched(self, ppReq): # self.sentPrePrepares[ppReq.viewNo, ppReq.ppSeqNo] = ppReq # ppReq = updateNamedTuple(ppReq, **{f.PP_SEQ_NO.nm: 1}) # self.send(ppReq, TPCStat.PrePrepareSent) # # primaryRepl.sendPrePrepare = types.MethodType(patched, primaryRepl) logger.debug( "Decrementing the primary replica's pre-prepare sequence number by " "one...") primaryRepl._lastPrePrepareSeqNo -= 1 view_no = primaryRepl.viewNo request2 = sdk_json_to_request_object( sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)[0][0]) timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=timeout)) # Since the node is malicious, it will not be able to process requests due # to conflicts in PRE-PREPARE primaryRepl.node.stop() looper.removeProdable(primaryRepl.node) reqIdr = [request2.digest] prePrepareReq = PrePrepare( primaryRepl.instId, view_no, primaryRepl.lastPrePrepareSeqNo, get_utc_epoch(), reqIdr, init_discarded(), primaryRepl.batchDigest([request2]), DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), 0, True ) logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1, timeout=timeout)) logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") timeout = waits.expectedPrepareTime(len(txnPoolNodeSet)) looper.runFor(timeout) # expect prepare processing timeout # check if prepares have not been sent for npr in nonPrimaryReplicas: with pytest.raises(AssertionError): looper.run(eventually(checkPrepareReqSent, npr, request2.key, view_no, retryWait=1, timeout=timeout)) timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + c_delay result1 = sdk_get_replies(looper, [req1])[0][1] logger.debug("request {} gives result {}".format(request1, result1))
def test_revert_auth_rule_changing(looper, txnPoolNodeSet, sdk_wallet_trustee, sdk_wallet_steward, sdk_pool_handle): node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet] wh, _ = sdk_wallet_trustee new_steward_did, new_steward_verkey = create_verkey_did(looper, wh) new_steward_did2, new_steward_verkey2 = create_verkey_did(looper, wh) """We try to change rule for adding new steward. For this case we """ changed_constraint = AuthConstraint(role=STEWARD, sig_count=1) action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD) with delay_rules_without_processing( node_stashers, pDelay(), cDelay(), msg_rep_delay(types_to_delay=[PREPARE, COMMIT])): sdk_send_and_check_auth_rule_request( looper, sdk_pool_handle, sdk_wallet_trustee, auth_action=ADD_PREFIX, auth_type=action.txn_type, field=action.field, new_value=action.value, old_value=None, constraint=changed_constraint.as_dict, no_wait=True) looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet))) """ Try to add new steward by already existed trustee. Validation should raise exception because we change uncommitted state by adding new rule, that "Only steward can add new steward" """ with pytest.raises(RequestRejectedException, match="Not enough STEWARD signatures"): sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee, 'newSteward1', STEWARD_STRING, dest=new_steward_did, verkey=new_steward_verkey) looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet))) """ Catchup should revert config_state and discard rule changing """ for n in txnPoolNodeSet: n.start_catchup() # clear all request queues to not re-send the AuthRule txns again for n in txnPoolNodeSet: n.requests.clear() for r in n.replicas.values(): r._ordering_service.requestQueues[DOMAIN_LEDGER_ID].clear() r._ordering_service.requestQueues[CONFIG_LEDGER_ID].clear() looper.run( eventually( lambda nodes: assertExp( all([n.mode == Mode.participating for n in nodes])), txnPoolNodeSet)) # do view change to not send PrePrepares with the same ppSeqNo and viewNo ensure_view_change_complete(looper, txnPoolNodeSet) """ Try to create new steward by steward We can not do this, because AUTH_RULE txn was reverted """ with pytest.raises(RequestRejectedException, match="Not enough TRUSTEE signatures"): sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, 'newSteward2', STEWARD_STRING, dest=new_steward_did2, verkey=new_steward_verkey2)