def testOrderingWhenPrePrepareNotReceived(looper, nodeSet, up, client1, wallet1): """ Send commits and prepares but delay pre-prepare such that enough prepares and commits are received, now the request should not be ordered until pre-prepare is received and ordering should just happen once, """ nonPrimReps = getNonPrimaryReplicas(nodeSet, 0) slowRep = nonPrimReps[0] slowNode = slowRep.node slowNode.nodeIbStasher.delay(ppDelay(10, 0)) sendRandomRequest(wallet1, client1) stash = [] origMethod = slowRep.processReqDigest def patched(self, msg): stash.append(msg) patchedMethod = types.MethodType(patched, slowRep) slowRep.processReqDigest = patchedMethod def chk1(): assert len(slowRep.commitsWaitingForPrepare) > 0 looper.run(eventually(chk1, timeout=4)) for item in stash: origMethod(item) def chk2(): assert len(slowRep.commitsWaitingForPrepare) == 0 assert slowRep.spylog.count(slowRep.doOrder.__name__) == 1 looper.run(eventually(chk2, timeout=12))
def test_slow_node_has_warn_unordered_log_msg(looper, nodeSet, wallet1, client1, patch_monitors): npr = getNonPrimaryReplicas(nodeSet, 0)[0] slow_node = npr.node monitor = nodeSet[0].monitor delay = monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC * \ monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM + 10 delaysCommitProcessing(slow_node, delay=delay) assert no_any_warn(*nodeSet), \ 'all nodes do not have warnings before test' for i in range(monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM): req = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) looper.runFor(monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC) others = [node for node in nodeSet if node.name != slow_node.name] assert no_any_warn(*others), \ 'others do not have warning after test' assert has_some_warn(slow_node), \ 'slow node has the warning' ordered_requests_keys_len_before = len(monitor.ordered_requests_keys) # wait at least windows time looper.runFor(monitor.WARN_NOT_PARTICIPATING_WINDOW_MINS * 60) req = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) assert no_any_warn(*others), 'others do not have warning' assert no_last_warn(slow_node), \ 'the last call of warn_has_lot_unordered_requests returned False ' \ 'so slow node has no the warning for now' assert len(monitor.ordered_requests_keys) < ordered_requests_keys_len_before, \ "ordered_requests_keys was cleaned up"
def testQueueingReqFromFutureView(delayedPerf, looper, nodeSet, up, client1): """ Test if every node queues 3 Phase requests(PRE-PREPARE, PREPARE and COMMIT) that come from a view which is greater than the current view """ f = getMaxFailures(nodeCount) # Delay processing of instance change on a node nodeA = nodeSet.Alpha nodeA.nodeIbStasher.delay(icDelay(60)) nonPrimReps = getNonPrimaryReplicas(nodeSet, 0) # Delay processing of PRE-PREPARE from all non primary replicas of master # so master's throughput falls and view changes ppDelayer = ppDelay(5, 0) for r in nonPrimReps: r.node.nodeIbStasher.delay(ppDelayer) sendReqsToNodesAndVerifySuffReplies(looper, client1, 4, timeout=5 * nodeCount) # Every node except Node A should have a view change for node in nodeSet: if node.name != nodeA.name: looper.run(eventually( partial(checkViewChangeInitiatedForNode, node, 0), retryWait=1, timeout=20)) # Node A's view should not have changed yet with pytest.raises(AssertionError): looper.run(eventually(partial( checkViewChangeInitiatedForNode, nodeA, 0), retryWait=1, timeout=20)) # NodeA should not have any pending 3 phase request for a later view for r in nodeA.replicas: # type: TestReplica assert len(r.threePhaseMsgsForLaterView) == 0 # Reset delays on incoming messages from all nodes for node in nodeSet: node.nodeIbStasher.nodelay(ppDelayer) # Send one more request sendRandomRequest(client1) def checkPending3PhaseReqs(): # Get all replicas that have their primary status decided reps = [rep for rep in nodeA.replicas if rep.isPrimary is not None] # Atleast one replica should have its primary status decided assert len(reps) > 0 for r in reps: # type: TestReplica logging.debug("primary status for replica {} is {}" .format(r, r.primaryNames)) assert len(r.threePhaseMsgsForLaterView) > 0 # NodeA should now have pending 3 phase request for a later view looper.run(eventually(checkPending3PhaseReqs, retryWait=1, timeout=30))
def testOrderingWhenPrePrepareNotReceived(looper, nodeSet, up, client1, wallet1): """ Send commits and prepares but delay pre-prepare such that enough prepares and commits are received, now the request should not be ordered until pre-prepare is received and ordering should just happen once, """ nonPrimReps = getNonPrimaryReplicas(nodeSet, 0) slowRep = nonPrimReps[0] slowNode = slowRep.node slowNode.nodeIbStasher.delay(ppDelay(10, 0)) stash = [] origMethod = slowRep.processReqDigest def patched(self, msg): stash.append(msg) patchedMethod = types.MethodType(patched, slowRep) slowRep.processReqDigest = patchedMethod def chk1(): assert len(slowRep.commitsWaitingForPrepare) > 0 sendRandomRequest(wallet1, client1) looper.run(eventually(chk1, timeout=4)) for item in stash: origMethod(item) def chk2(): assert len(slowRep.commitsWaitingForPrepare) == 0 assert slowRep.spylog.count(slowRep.doOrder.__name__) == 1 looper.run(eventually(chk2, timeout=12))
def testOrderingCase1(looper, nodeSet, up, client1, wallet1): """ Scenario -> PRE-PREPARE not received by the replica, Request not received for ordering by the replica, but received enough commits to start ordering. It queues up the request so when a PRE-PREPARE is received or request is receievd for ordering, an order can be triggered https://www.pivotaltracker.com/story/show/125239401 Reproducing by - Pick a node with no primary replica, replica ignores forwarded request to replica and delay reception of PRE-PREPARE sufficiently so that enough COMMITs reach to trigger ordering. """ replica = getNonPrimaryReplicas(nodeSet, instId=0)[0] delaysPrePrepareProcessing(replica.node, delay=10, instId=0) def doNotProcessReqDigest(self, rd: ReqDigest): pass patchedMethod = types.MethodType(doNotProcessReqDigest, replica) replica.processReqDigest = patchedMethod def chk(n): assert replica.spylog.count(replica.doOrder.__name__) == n sendRandomRequest(wallet1, client1) looper.run(eventually(chk, 0, retryWait=1, timeout=5)) looper.run(eventually(chk, 1, retryWait=1, timeout=15))
def testOrderingCase1(looper, nodeSet, up, client1, wallet1): """ Scenario -> PRE-PREPARE not received by the replica, Request not received for ordering by the replica, but received enough commits to start ordering. It queues up the request so when a PRE-PREPARE is received or request is receievd for ordering, an order can be triggered https://www.pivotaltracker.com/story/show/125239401 Reproducing by - Pick a node with no primary replica, replica ignores forwarded request to replica and delay reception of PRE-PREPARE sufficiently so that enough COMMITs reach to trigger ordering. """ delay = 10 replica = getNonPrimaryReplicas(nodeSet, instId=0)[0] delaysPrePrepareProcessing(replica.node, delay=delay, instId=0) def doNotProcessReqDigest(self, _): pass patchedMethod = types.MethodType(doNotProcessReqDigest, replica) replica.processRequest = patchedMethod def chk(n): assert replica.spylog.count(replica.doOrder.__name__) == n sendRandomRequest(wallet1, client1) timeout = delay - 5 looper.run(eventually(chk, 0, retryWait=1, timeout=timeout)) timeout = delay + 5 looper.run(eventually(chk, 1, retryWait=1, timeout=timeout))
def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and sequence number "n" """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) request1 = sendRandomRequest(wallet1, client1) result1 = looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, request1.reqId, fValue, retryWait=1, timeout=5)) logger.debug("request {} gives result {}".format(request1, result1)) primaryRepl = getPrimaryReplica(nodeSet) logger.debug("Primary Replica: {}".format(primaryRepl)) logger.debug( "Decrementing the primary replica's pre-prepare sequence number by " "one...") primaryRepl.lastPrePrepareSeqNo -= 1 request2 = sendRandomRequest(wallet1, client1) looper.run( eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=10)) nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) prePrepareReq = PrePrepare(primaryRepl.instId, primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo, wallet1.defaultId, request2.reqId, request2.digest, time.time()) logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") looper.run( eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1, timeout=10)) logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") for npr in nonPrimaryReplicas: with pytest.raises(AssertionError): looper.run( eventually(checkPrepareReqSent, npr, wallet1.defaultId, request2.reqId, retryWait=1, timeout=10))
def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1): """ Replicas should not accept PRE-PREPARE for view "v" and prepare sequence number "n" if it has already accepted a request with view number "v" and sequence number "n" """ numOfNodes = 4 fValue = getMaxFailures(numOfNodes) request1 = sendRandomRequest(wallet1, client1) result1 = looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, request1.reqId, fValue, retryWait=1, timeout=5)) logger.debug("request {} gives result {}".format(request1, result1)) primaryRepl = getPrimaryReplica(nodeSet) logger.debug("Primary Replica: {}".format(primaryRepl)) logger.debug( "Decrementing the primary replica's pre-prepare sequence number by " "one...") primaryRepl.lastPrePrepareSeqNo -= 1 request2 = sendRandomRequest(wallet1, client1) looper.run(eventually(checkPrePrepareReqSent, primaryRepl, request2, retryWait=1, timeout=10)) nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet) logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas)) prePrepareReq = PrePrepare( primaryRepl.instId, primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo, wallet1.defaultId, request2.reqId, request2.digest, time.time() ) logger.debug("""Checking whether all the non primary replicas have received the pre-prepare request with same sequence number""") looper.run(eventually(checkPrePrepareReqRecvd, nonPrimaryReplicas, prePrepareReq, retryWait=1, timeout=10)) logger.debug("""Check that none of the non primary replicas didn't send any prepare message " in response to the pre-prepare message""") for npr in nonPrimaryReplicas: with pytest.raises(AssertionError): looper.run(eventually(checkPrepareReqSent, npr, wallet1.defaultId, request2.reqId, retryWait=1, timeout=10))
def testOrderingWhenPrePrepareNotReceived(looper, nodeSet, up, client1, wallet1): """ Send commits but delay pre-prepare and prepares such that enough commits are received, now the request should not be ordered until pre-prepare is received and ordering should just happen once, """ delay = 10 non_prim_reps = getNonPrimaryReplicas(nodeSet, 0) slow_rep = non_prim_reps[0] slow_node = slow_rep.node slow_node.nodeIbStasher.delay(ppDelay(delay, 0)) slow_node.nodeIbStasher.delay(pDelay(delay, 0)) stash_pp = [] stash_p = [] orig_pp_method = slow_rep.processPrePrepare orig_p_method = slow_rep.processPrepare def patched_pp(self, msg, sender): stash_pp.append((msg, sender)) def patched_p(self, msg, sender): stash_p.append((msg, sender)) slow_rep.processPrePrepare = \ types.MethodType(patched_pp, slow_rep) slow_rep.processPrepare = \ types.MethodType(patched_p, slow_rep) def chk1(): assert len(slow_rep.commitsWaitingForPrepare) > 0 sendRandomRequest(wallet1, client1) timeout = waits.expectedPrePrepareTime(len(nodeSet)) + delay looper.run(eventually(chk1, retryWait=1, timeout=timeout)) for m, s in stash_pp: orig_pp_method(m, s) for m, s in stash_p: orig_p_method(m, s) def chk2(): assert len(slow_rep.commitsWaitingForPrepare) == 0 assert slow_rep.spylog.count(slow_rep.doOrder.__name__) == 1 timeout = waits.expectedOrderingTime(len(non_prim_reps) + 1) + 2 * delay looper.run(eventually(chk2, retryWait=1, timeout=timeout))
def testReqExecWhenReturnedByMaster(tdir_for_func, tconf_for_func): with TestNodeSet(tconf_for_func, count=4, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: client1, wallet1 = setupNodesAndClient(looper, nodeSet, tmpdir=tdir_for_func) req = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) async def chk(): for node in nodeSet: entries = node.spylog.getAll(node.processOrdered.__name__) for entry in entries: arg = entry.params['ordered'] result = entry.result if arg.instId == node.instances.masterId: assert result else: assert result is False timeout = waits.expectedOrderingTime( nodeSet.nodes['Alpha'].instances.count) looper.run(eventually(chk, timeout=timeout))
def testClientRetryRequestWhenReplyNotReceived(looper, nodeSet, client1, wallet1, tconf): """ A node say Alpha sends ACK but doesn't send REPLY. The connect resends the request and gets REPLY """ alpha = nodeSet.Alpha skipped = False origTrans = alpha.transmitToClient def skipReplyOnce(msg, remoteName): nonlocal skipped if isinstance(msg, Reply) and not skipped: skipped = True return origTrans(msg, remoteName) alpha.transmitToClient = skipReplyOnce req = sendRandomRequest(wallet1, client1) coros = [partial(checkReqAck, client1, node, *req.key) for node in nodeSet] looper.run(eventuallyAll(*coros, retryWait=.5, totalTimeout=3)) looper.run(eventually(checkReplyCount, client1, *req.key, 3, retryWait=1, timeout=3)) looper.run(eventually(checkReplyCount, client1, *req.key, 4, retryWait=1, timeout=tconf.CLIENT_REPLY_TIMEOUT + 5))
def testClientNotRetryRequestWhenReqnackReceived(looper, nodeSet, client1, wallet1, tconf): """ A node sends REQNACK. The client does not resend Request. """ alpha = nodeSet.Alpha origProcReq = alpha.processRequest origTrans = alpha.transmitToClient def nackReq(self, req, frm): self.transmitToClient(RequestNack(*req.key, reason="testing"), frm) def onlyTransNack(msg, remoteName): if not isinstance(msg, RequestNack): return origTrans(msg, remoteName) alpha.clientMsgRouter.routes[Request] = types.MethodType(nackReq, alpha) alpha.transmitToClient = onlyTransNack totalResends = client1.spylog.count(client1.resendRequests.__name__) req = sendRandomRequest(wallet1, client1) # Wait till ACK timeout looper.runFor(tconf.CLIENT_REQACK_TIMEOUT+1) assert client1.spylog.count(client1.resendRequests.__name__) == totalResends # Wait till REPLY timeout looper.runFor(tconf.CLIENT_REPLY_TIMEOUT - tconf.CLIENT_REQACK_TIMEOUT + 1) assert client1.spylog.count(client1.resendRequests.__name__) == totalResends looper.run(eventually(checkReplyCount, client1, *req.key, 3, retryWait=1, timeout=3)) alpha.clientMsgRouter.routes[Request] = origProcReq alpha.transmitToClient = origTrans
def requests(looper, wallet1, client1): requests = [] for i in range(5): req = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) requests.append(req) return requests
def testClientNotRetryingRequestAfterMaxTriesDone(looper, nodeSet, client1, wallet1, tconf): """ A client sends Request to a node but the node never responds to client. The client resends the request but only the number of times defined in its configuration and no more """ alpha = nodeSet.Alpha origTrans = alpha.transmitToClient def dontTransmitReply(msg, remoteName): if isinstance(msg, Reply): return origTrans(msg, remoteName) alpha.transmitToClient = dontTransmitReply totalResends = client1.spylog.count(client1.resendRequests.__name__) req = sendRandomRequest(wallet1, client1) # Wait for more than REPLY timeout looper.runFor((tconf.CLIENT_MAX_RETRY_REPLY + 2) * tconf.CLIENT_REPLY_TIMEOUT + 2) looper.run( eventually(checkReplyCount, client1, *req.key, 3, retryWait=1, timeout=3)) assert client1.spylog.count(client1.resendRequests.__name__) == \ (totalResends + tconf.CLIENT_MAX_RETRY_REPLY) assert req.key not in client1.expectingAcksFor assert req.key not in client1.expectingRepliesFor alpha.processRequest = origTrans
def testClientNotRetryingRequestAfterMaxTriesDone(looper, nodeSet, client1, wallet1, tconf): """ A client sends Request to a node but the node never responds to client. The client resends the request but only the number of times defined in its configuration and no more """ alpha = nodeSet.Alpha origTrans = alpha.transmitToClient def dontTransmitReply(msg, remoteName): if isinstance(msg, Reply): return origTrans(msg, remoteName) alpha.transmitToClient = dontTransmitReply totalResends = client1.spylog.count(client1.resendRequests.__name__) req = sendRandomRequest(wallet1, client1) # Wait for more than REPLY timeout looper.runFor((tconf.CLIENT_MAX_RETRY_REPLY+2)*tconf.CLIENT_REPLY_TIMEOUT+2) looper.run(eventually(checkReplyCount, client1, *req.key, 3, retryWait=1, timeout=3)) assert client1.spylog.count(client1.resendRequests.__name__) == \ (totalResends + tconf.CLIENT_MAX_RETRY_REPLY) assert req.key not in client1.expectingAcksFor assert req.key not in client1.expectingRepliesFor alpha.processRequest = origTrans
def testMerkleProofForNonFirstLeaf(looper, nodeSet, wallet1, client1, replied1): req2 = sendRandomRequest(wallet1, client1) f = nodeSet.f looper.run(eventually(checkSufficientRepliesRecvd, client1.inBox, req2.reqId , f, retryWait=1, timeout=15)) replies = client1.getRepliesFromAllNodes(*req2.key).values() assert Client.verifyMerkleProof(*replies)
def testClientRetryRequestWhenAckNotReceived(looper, nodeSet, client1, wallet1, tconf): """ The client gets disconnected from node say Alpha but does not know it. It sends request to all nodes including Alpha, expects ACK and REPLY from Alpha too, does not get it, so reconnects to Alpha and sends request again and gets REPLY """ alpha = nodeSet.Alpha r = alpha.clientstack.getRemote(client1.stackName) alpha.clientstack.removeRemote(r) req = sendRandomRequest(wallet1, client1) def chkAcks(): for node in nodeSet: if node != alpha: checkReqAck(client1, node, *req.key) else: with pytest.raises(AssertionError): checkReqAck(client1, node, *req.key) looper.run(eventually(chkAcks, retryWait=1, timeout=3)) looper.run(eventually(checkReplyCount, client1, *req.key, 4, retryWait=1, timeout=tconf.CLIENT_REQACK_TIMEOUT+10))
def testReplyWhenRepliesFromExactlyFPlusOneNodesAreSame(looper, client1): """ When only :math:`2f+1` replies from the nodes are matching, the client would accept the reply """ request = sendRandomRequest(client1) # exactly f + 1 => (3) nodes have correct responses # modify some (numOfResponses of type REPLY - (f + 1)) => 4 responses to # have a different operations looper.run( eventually(assertLength, client1.inBox, 2 * nodeCount * request.reqId, retryWait=.25, timeout=15)) replies = (msg for msg, frm in client1.inBox if msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm][f.REQ_ID.nm] == request.reqId) # change two responses to something different for i in range(2): msg = next(replies) msg[f.RESULT.nm][TXN_ID] = str(i) + "Some random id" checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
def testReqExecWhenReturnedByMaster(tdir_for_func): with TestNodeSet(count=4, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: for n in nodeSet: n.startKeySharing() client1, wallet1 = setupNodesAndClient(looper, nodeSet, tmpdir=tdir_for_func) req = sendRandomRequest(wallet1, client1) looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=15)) async def chk(): for node in nodeSet: entries = node.spylog.getAll(node.processOrdered.__name__) for entry in entries: arg = entry.params['ordered'] result = entry.result if arg.instId == node.instances.masterId: assert result else: assert result is None looper.run(eventually(chk, timeout=3))
def testReqExecWhenReturnedByMaster(tdir_for_func): with TestNodeSet(count=4, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: for n in nodeSet: n.startKeySharing() client1, wallet1 = setupNodesAndClient(looper, nodeSet, tmpdir=tdir_for_func) req = sendRandomRequest(wallet1, client1) looper.run(eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=15)) async def chk(): for node in nodeSet: entries = node.spylog.getAll( node.processOrdered.__name__) for entry in entries: arg = entry.params['ordered'] result = entry.result if arg.instId == node.instances.masterId: assert result else: assert result is None looper.run(eventually(chk, timeout=3))
def testReplyWhenRepliesFromExactlyFPlusOneNodesAreSame( looper, client1, wallet1): """ When only :math:`f+1` replies from the nodes are matching, the client would accept the reply """ request = sendRandomRequest(wallet1, client1) # exactly f + 1 => (3) nodes have correct responses # modify some (numOfResponses of type REPLY - (f + 1)) => 4 responses to # have a different operations responseTimeout = waits.expectedTransactionExecutionTime(nodeCount) looper.run( eventually(checkResponseRecvdFromNodes, client1, nodeCount, request.reqId, retryWait=1, timeout=responseTimeout)) replies = (msg for msg, frm in client1.inBox if msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm][f.REQ_ID.nm] == request.reqId) # change two responses to something different for i in range(2): msg = next(replies) msg[f.RESULT.nm][f.SIG.nm] = str(i) + "Some random id" checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
def testClientRetryRequestWhenAckNotReceived(looper, nodeSet, client1, wallet1, tconf): """ The client gets disconnected from node say Alpha but does not know it. It sends request to all nodes including Alpha, expects ACK and REPLY from Alpha too, does not get it, so reconnects to Alpha and sends request again and gets REPLY """ alpha = nodeSet.Alpha r = alpha.clientstack.getRemote(client1.stackName) alpha.clientstack.removeRemote(r) req = sendRandomRequest(wallet1, client1) def chkAcks(): for node in nodeSet: if node != alpha: checkReqAck(client1, node, *req.key) else: with pytest.raises(AssertionError): checkReqAck(client1, node, *req.key) looper.run(eventually(chkAcks, retryWait=1, timeout=3)) looper.run(eventually(checkReplyCount, client1, *req.key, 4, retryWait=1, timeout=tconf.CLIENT_REQACK_TIMEOUT+5))
def testAvgReqLatency(looper: Looper, nodeSet: TestNodeSet, wallet1, client1): """ Checking if average latency is being set """ for i in range(5): req = sendRandomRequest(wallet1, client1) looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=5)) for node in nodeSet: # type: Node mLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, node.instances.masterId) bLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, *node.instances.backupIds) logger.debug( "Avg. master latency : {}. Avg. backup latency: {}".format( mLat, bLat)) assert mLat > 0 assert bLat > 0
def testReplyWhenRepliesFromExactlyFPlusOneNodesAreSame(looper, client1, wallet1): """ When only :math:`2f+1` replies from the nodes are matching, the client would accept the reply """ request = sendRandomRequest(wallet1, client1) # exactly f + 1 => (3) nodes have correct responses # modify some (numOfResponses of type REPLY - (f + 1)) => 4 responses to # have a different operations looper.run( eventually(checkResponseRecvdFromNodes, client1, 2 * nodeCount, request.reqId, retryWait=1, timeout=20)) replies = (msg for msg, frm in client1.inBox if msg[OP_FIELD_NAME] == REPLY and msg[f.RESULT.nm][f.REQ_ID.nm] == request.reqId) # change two responses to something different for i in range(2): msg = next(replies) msg[f.RESULT.nm][TXN_ID] = str(i) + "Some random id" checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
def write_wrapped(): req = sendRandomRequest(wallet, client) waitForSufficientRepliesForRequests(txnPoolNodesLooper, client, requests=[req]) txnPoolNodesLooper.runFor(patched_dump_info_period) return load_info(info_path)
def requests(looper, client1): requests = [] for i in range(5): req = sendRandomRequest(client1) looper.run(eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=5)) requests.append(req) return requests
def testClientSendingSameRequestAgainBeforeFirstIsProcessed( looper, nodeSet, up, wallet1, client1): size = len(client1.inBox) req = sendRandomRequest(wallet1, client1) client1.submitReqs(req) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) # Only REQACK will be sent twice by the node but not REPLY assert len(client1.inBox) == size + 12
def test_pool_reaches_quorum_after_f_plus_2_nodes_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, wallet1, client1, client1Connected): nodes = txnPoolNodeSet initial_view_no = nodes[0].viewNo request = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request]) stop_node(nodes[0], looper, nodes) waitForViewChange(looper, nodes[1:], expectedViewNo=initial_view_no + 1) ensureElectionsDone(looper, nodes[1:], numInstances=getRequiredInstances(nodeCount)) request = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request]) stop_node(nodes[1], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + 2) checkViewNoForNodes(nodes[2:], initial_view_no + 1) request = sendRandomRequest(wallet1, client1) verify_request_not_replied_and_not_ordered(request, looper, client1, nodes) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + 2) checkViewNoForNodes(nodes[3:], initial_view_no + 1) request = sendRandomRequest(wallet1, client1) verify_request_not_replied_and_not_ordered(request, looper, client1, nodes) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) request = sendRandomRequest(wallet1, client1) verify_request_not_replied_and_not_ordered(request, looper, client1, nodes) nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[1:], numInstances=getRequiredInstances(nodeCount)) waitForViewChange(looper, nodes[1:], expectedViewNo=initial_view_no + 1) request = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request]) nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes, numInstances=getRequiredInstances(nodeCount)) waitForViewChange(looper, nodes, expectedViewNo=initial_view_no + 1) request = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request])
def test_working_has_no_warn_log_msg(looper, nodeSet, wallet1, client1, patch_monitors): monitor = nodeSet[0].monitor assert no_any_warn(*nodeSet) for i in range(monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM): req = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) looper.runFor(monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC) assert no_any_warn(*nodeSet)
def testReplyWhenRepliesFromAllNodesAreSame(looper, client1, wallet1): """ When there are not faulty nodes, the client must get a reply from all the nodes. """ request = sendRandomRequest(wallet1, client1) looper.run( eventually(checkResponseRecvdFromNodes, client1, 2 * nodeCount, request.reqId, retryWait=1, timeout=20)) checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
def testReplyWhenRepliesFromAllNodesAreSame(looper, client1): """ When there are not faulty nodes, the client must get a reply from all the nodes. """ request = sendRandomRequest(client1) looper.run( eventually(assertLength, client1.inBox, 2 * nodeCount * request.reqId, retryWait=.25, timeout=15)) checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
def testClientSendingSameRequestAgainBeforeFirstIsProcessed(looper, nodeSet, up, wallet1, client1): size = len(client1.inBox) req = sendRandomRequest(wallet1, client1) client1.submitReqs(req) f = getMaxFailures(len(nodeSet)) looper.run(eventually( checkSufficientRepliesRecvd, client1.inBox, req.reqId, f, retryWait=1, timeout=3 * len(nodeSet))) # Only REQACK will be sent twice by the node but not REPLY assert len(client1.inBox) == size + 12
def testMerkleProofForNonFirstLeaf(looper, nodeSet, client1: Client, replied1): req2 = sendRandomRequest(client1) f = nodeSet.f looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, req2.reqId, f, retryWait=1, timeout=15)) replies = client1.getRepliesFromAllNodes(req2.reqId).values() assert Client.verifyMerkleProof(*replies)
def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on( looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, wallet1, client1): nodes = txnPoolNodeSet request1 = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request1]) stop_node(nodes[0], looper, nodes) waitForViewChange(looper, nodes[1:], expectedViewNo=1) ensureElectionsDone(looper, nodes[1:], numInstances=getRequiredInstances(nodeCount)) request2 = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request2]) stop_node(nodes[1], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[2:], expectedViewNo=1) request3 = sendRandomRequest(wallet1, client1) verify_request_not_replied_and_not_ordered(request3, looper, client1, nodes) stop_node(nodes[2], looper, nodes) looper.runFor(tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) request4 = sendRandomRequest(wallet1, client1) verify_request_not_replied_and_not_ordered(request4, looper, client1, nodes) nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath) looper.runFor(waits.expectedPoolElectionTimeout(len(nodes))) checkViewNoForNodes(nodes[3:], expectedViewNo=1) request5 = sendRandomRequest(wallet1, client1) verify_request_not_replied_and_not_ordered(request5, looper, client1, nodes) nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes[1:], numInstances=getRequiredInstances(nodeCount)) checkViewNoForNodes(nodes[1:], expectedViewNo=1) request6 = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request6]) nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir, allPluginsPath) ensureElectionsDone(looper, nodes, numInstances=getRequiredInstances(nodeCount)) checkViewNoForNodes(nodes, expectedViewNo=1) request7 = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[request7])
def x(): requests = [sendRandomRequest(client) for _ in range(10)] for request in requests: looper.run(eventually( checkSufficientRepliesRecvd, client.inBox, request.reqId, 3, retryWait=1, timeout=3 * len(nodeSet))) ss2 = snapshotStats(*nodeSet) diff = statsDiff(ss2, ss1) pprint(ss2) print("----------------------------------------------") pprint(diff)
def requests(looper, wallet1, client1): requests = [] for i in range(5): req = sendRandomRequest(wallet1, client1) looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=5)) requests.append(req) return requests
def testClientConnectToRestartedNodes(looper, txnPoolNodeSet, tdirWithPoolTxns, poolTxnClientNames, poolTxnData, tconf, poolTxnNodeNames, allPluginsPath): name = poolTxnClientNames[-1] seed = poolTxnData["seeds"][name] newClient, w = genTestClient(tmpdir=tdirWithPoolTxns, nodes=txnPoolNodeSet, name=name, usePoolLedger=True) looper.add(newClient) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1) for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # looper.run(newClient.ensureDisconnectedToNodes(timeout=60)) txnPoolNodeSet = [] for nm in poolTxnNodeNames: node = TestNode(nm, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath) looper.add(node) txnPoolNodeSet.append(node) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, retryWait=1, timeout=10) def chk(): for node in txnPoolNodeSet: assert node.isParticipating looper.run(eventually(chk, retryWait=1, timeout=10)) bootstrapClientKeys(w.defaultId, w.getVerkey(), txnPoolNodeSet) req = sendRandomRequest(w, newClient) checkSufficientRepliesForRequests(looper, newClient, [ req, ], timeoutPerReq=10) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1)
def x(): requests = [sendRandomRequest(wal, client) for _ in range(10)] waitForSufficientRepliesForRequests(looper, client, requests=requests) ss2 = snapshotStats(*nodeSet) diff = statsDiff(ss2, ss1) if not config.UseZStack: ss2 = snapshotStats(*nodeSet) diff = statsDiff(ss2, ss1) pprint(ss2) print("----------------------------------------------") pprint(diff)
def testThroughput(looper, nodeSet: Iterable[Node], wallet1, client1): """ Checking if the throughput is being set """ for i in range(5): req = sendRandomRequest(wallet1, client1) waitForSufficientRepliesForRequests(looper, client1, requests=[req]) for node in nodeSet: masterThroughput, avgBackupThroughput = node.monitor.getThroughputs( node.instances.masterId) logger.debug("Master throughput: {}. Avg. backup throughput: {}". format(masterThroughput, avgBackupThroughput)) assert masterThroughput > 0 assert avgBackupThroughput > 0
def testClientSendingSameRequestAgainBeforeFirstIsProcessed( looper, nodeSet, up, wallet1, client1): size = len(client1.inBox) req = sendRandomRequest(wallet1, client1) client1.submitReqs(req) f = getMaxFailures(len(nodeSet)) looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, f, retryWait=1, timeout=3 * len(nodeSet))) # Only REQACK will be sent twice by the node but not REPLY assert len(client1.inBox) == size + 12
def test_state_proof_checked_in_client_request(looper, txnPoolNodeSet, client1, wallet1): """ Checks that client cat use state proofs instead of quorum for replies. To implement this test state proof is added to replies for 'greeting' requests in TestNode. Parsing added to TestClient. """ request = sendRandomRequest(wallet1, client1) responseTimeout = waits.expectedTransactionExecutionTime(nodeCount) looper.run( eventually(check_proved_reply_received, client1, request.identifier, request.reqId, retryWait=1, timeout=responseTimeout)) checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
def testThroughput(looper, nodeSet: Iterable[Node], wallet1, client1): """ Checking if the throughput is being set """ for i in range(5): req = sendRandomRequest(wallet1, client1) looper.run(eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=5)) for node in nodeSet: masterThroughput, avgBackupThroughput = node.monitor.getThroughputs(node.instances.masterId) logger.debug("Master throughput: {}. Avg. backup throughput: {}". format(masterThroughput, avgBackupThroughput)) assert masterThroughput > 0 assert avgBackupThroughput > 0
def testClientConnectToRestartedNodes(looper, txnPoolNodeSet, tdirWithPoolTxns, poolTxnClientNames, poolTxnData, tconf, poolTxnNodeNames, allPluginsPath): name = poolTxnClientNames[-1] seed = poolTxnData["seeds"][name] newClient, w = genTestClient(tmpdir=tdirWithPoolTxns, nodes=txnPoolNodeSet, name=name, usePoolLedger=True) looper.add(newClient) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1) for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # looper.run(newClient.ensureDisconnectedToNodes(timeout=60)) txnPoolNodeSet = [] for nm in poolTxnNodeNames: node = TestNode(nm, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath) looper.add(node) txnPoolNodeSet.append(node) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, retryWait=1, timeout=10) def chk(): for node in txnPoolNodeSet: assert node.isParticipating looper.run(eventually(chk, retryWait=1, timeout=10)) bootstrapClientKeys(w.defaultId, w.getVerkey(), txnPoolNodeSet) req = sendRandomRequest(w, newClient) checkSufficientRepliesForRequests(looper, newClient, [req, ], timeoutPerReq=10) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1)
def testAvgReqLatency(looper: Looper, nodeSet: TestNodeSet, wallet1, client1): """ Checking if average latency is being set """ for i in range(5): req = sendRandomRequest(wallet1, client1) looper.run(eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=5)) for node in nodeSet: # type: Node mLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, node.instances.masterId) bLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, *node.instances.backupIds) logger.debug("Avg. master latency : {}. Avg. backup latency: {}". format(mLat, bLat)) assert mLat > 0 assert bLat > 0
def testRequestReturnToNodeWhenPrePrepareNotReceivedByOneNode(tdir_for_func): """Test no T-3""" nodeNames = genNodeNames(7) nodeReg = genNodeReg(names=nodeNames) with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: prepareNodeSet(looper, nodeSet) logging.debug("Add the seven nodes back in") # Every node except A delays self nomination so A can become primary nodeA = addNodeBack(nodeSet, looper, nodeNames[0]) for i in range(1, 7): node = addNodeBack(nodeSet, looper, nodeNames[i]) node.delaySelfNomination(15) nodeB = nodeSet.getNode(nodeNames[1]) # Node B delays PREPREPARE from node A(which would be the primary) # for a long time. nodeB.nodeIbStasher.delay( delayerMsgTuple(120, PrePrepare, nodeA.name)) # Ensure elections are done ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=30) assert nodeA.hasPrimary instNo = nodeA.primaryReplicaNo client1 = setupClient(looper, nodeSet, tmpdir=tdir_for_func) req = sendRandomRequest(client1) # All nodes including B should return their ordered requests for node in nodeSet: looper.run(eventually(checkRequestReturnedToNode, node, client1.defaultIdentifier, req.reqId, req.digest, instNo, retryWait=1, timeout=30)) # Node B should not have received the PRE-PREPARE request yet replica = nodeB.replicas[instNo] # type: Replica assert len(replica.prePrepares) == 0
def testReplyWhenRepliesFromExactlyFPlusOneNodesAreSame(looper, client1): """ When only :math:`2f+1` replies from the nodes are matching, the client would accept the reply """ request = sendRandomRequest(client1) # exactly f + 1 => (3) nodes have correct responses # modify some (numOfResponses of type REPLY - (f + 1)) => 4 responses to # have a different operations looper.run( eventually(assertLength, client1.inBox, 2 * nodeCount * request.reqId, retryWait=.25, timeout=15)) replies = (msg for msg, frm in client1.inBox if msg['op'] == REPLY and msg['reqId'] == request.reqId) # change two responses to something different for i in range(2): msg = next(replies) msg['result']['txnId'] = str(i) + "Some random id" checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func): nodeNames = genNodeNames(4) nodeReg = genNodeReg(names=nodeNames) with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: prepareNodeSet(looper, nodeSet) nodeA, nodeB, nodeC, nodeD = tuple( addNodeBack(nodeSet, looper, nodeNames[i]) for i in range(0, 4)) # Nodes C and D delays self nomination so A and B can become # primaries nodeC.delaySelfNomination(30) nodeD.delaySelfNomination(30) # Node D delays receiving PRIMARY messages from all nodes so it # will not know whether it is primary or not # nodeD.nodestack.delay(delayer(20, PRIMARY)) nodeD.nodeIbStasher.delay(delayerMsgTuple(20, Primary)) checkPoolReady(looper=looper, nodes=nodeSet) client1 = setupClient(looper, nodeSet, tmpdir=tdir_for_func) request = sendRandomRequest(client1) # TODO Rethink this instNo = 0 for i in range(3): node = nodeSet.getNode(nodeNames[i]) # Nodes A, B and C should have received PROPAGATE request # from Node D looper.run( eventually(checkIfPropagateRecvdFromNode, node, nodeD, request.identifier, request.reqId, retryWait=1, timeout=10)) # Node D should have 1 pending PRE-PREPARE request def assertOnePrePrepare(): assert len(getPendingRequestsForReplica(nodeD.replicas[instNo], PrePrepare)) == 1 looper.run(eventually(assertOnePrePrepare, retryWait=1, timeout=10)) # Node D should have 2 pending PREPARE requests(from node B and C) def assertTwoPrepare(): assert len(getPendingRequestsForReplica(nodeD.replicas[instNo], Prepare)) == 2 looper.run(eventually(assertTwoPrepare, retryWait=1, timeout=10)) # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT # requests for reqType in [PrePrepare, Prepare, Commit]: looper.run(eventually(lambda: assertLength( getPendingRequestsForReplica(nodeD.replicas[instNo], reqType), 0), retryWait=1, timeout=20))