def testDuplicateInstanceChangeMsgsMarkNodeAsSuspicious(looper, nodeSet, up):
    maliciousNode = nodeSet.Alpha
    maliciousNode.send(InstanceChange(0))

    def chk(instId):
        for node in nodeSet:
            if node.name != maliciousNode.name:
                param = getAllArgs(node, Node.processInstanceChange)
                assert param[0]['instChg'].viewNo == instId
                assert param[0]['frm'] == maliciousNode.name

    looper.run(eventually(chk, 0, retryWait=1, timeout=20))
    maliciousNode.send(InstanceChange(0))

    def g():
        for node in nodeSet:
            if node.name != maliciousNode.name:
                frm, reason, code = getAllArgs(node, Node.reportSuspiciousNode)
                assert frm == maliciousNode.name
                assert isinstance(reason, SuspiciousNode)
                assert len(
                    getNodeSuspicions(
                        node, Suspicions.DUPLICATE_INST_CHNG.code)) == 12

    looper.run(eventually(g, retryWait=1, timeout=20))
Esempio n. 2
0
def ensureAgentsConnected(looper, agent1, agent2):
    e1 = agent1.endpoint
    e2 = agent2.endpoint
    looper.run(
        eventually(checkRemoteExists, e1, e2.name, CONNECTED, timeout=10))
    looper.run(
        eventually(checkRemoteExists, e2, e1.name, CONNECTED, timeout=10))
Esempio n. 3
0
def testDiscardInstChngMsgIfMasterDoesntSeePerformanceProblem(
        nodeSet, looper, ensureView):
    """
    A node that received an INSTANCE_CHANGE message must not send an
    INSTANCE_CHANGE message if it doesn't observe too much difference in
    performance between its replicas.
    """

    curViewNo = ensureView

    # Send an instance change message to all nodes
    icMsg = InstanceChange(curViewNo)
    nodeSet.Alpha.send(icMsg)

    # ensure every node but Alpha discards the invalid instance change request
    looper.run(
        eventually(checkDiscardMsg,
                   nodeSet,
                   icMsg,
                   'did not find the master to be slow',
                   nodeSet.Alpha,
                   timeout=5))

    # Check that that message is discarded.
    looper.run(eventually(checkViewNoForNodes, nodeSet, timeout=3))
def testAnonCreds(aliceAgent, aliceAcceptedFaber, aliceAcceptedAcme, acmeAgent, emptyLooper):
    # 1. request Claims from Faber
    faberLink = aliceAgent.wallet.getLink('Faber College')
    name, version, origin = faberLink.availableClaims[0]
    claimDefKey = ClaimDefinitionKey(name, version, origin)
    aliceAgent.sendReqClaim(faberLink, claimDefKey)

    # 2. check that claim is received from Faber
    async def chkClaims():
        claim = await aliceAgent.prover.wallet.getClaims(ID(claimDefKey))
        assert claim.primaryClaim

    emptyLooper.run(eventually(chkClaims, timeout=20))

    # 3. send claim proof to Acme
    acmeLink, acmeClaimPrfReq = aliceAgent.wallet.getMatchingLinksWithClaimReq("Job-Application", "Acme Corp")[0]
    aliceAgent.sendProof(acmeLink, acmeClaimPrfReq)

    # 4. check that claim proof is verified by Acme
    def chkProof():
        internalId = acmeAgent.getInternalIdByInvitedNonce(acmeLink.invitationNonce)
        link = acmeAgent.wallet.getLinkByInternalId(internalId)
        assert "Job-Application" in link.verifiedClaimProofs

    emptyLooper.run(eventually(chkProof, timeout=20))
def testOrderingWhenPrePrepareNotReceived(looper, nodeSet, up, client1,
                                          wallet1):
    """
    Send commits and prepares but delay pre-prepare such that enough prepares
    and commits are received, now the request should not be ordered until
    pre-prepare is received and ordering should just happen once,
    """
    nonPrimReps = getNonPrimaryReplicas(nodeSet, 0)
    slowRep = nonPrimReps[0]
    slowNode = slowRep.node
    slowNode.nodeIbStasher.delay(ppDelay(10, 0))
    sendRandomRequest(wallet1, client1)

    stash = []
    origMethod = slowRep.processReqDigest

    def patched(self, msg):
        stash.append(msg)

    patchedMethod = types.MethodType(patched, slowRep)
    slowRep.processReqDigest = patchedMethod

    def chk1():
        assert len(slowRep.commitsWaitingForPrepare) > 0

    looper.run(eventually(chk1, timeout=4))

    for item in stash:
        origMethod(item)

    def chk2():
        assert len(slowRep.commitsWaitingForPrepare) == 0
        assert slowRep.spylog.count(slowRep.doOrder.__name__) == 1

    looper.run(eventually(chk2, timeout=12))
Esempio n. 6
0
def testStatusAfterClientAdded(cli, validNodeNames, createAllNodes):
    clientName = "Joe"
    cli.enterCmd("new client {}".format(clientName))
    cli.looper.run(
        eventually(checkClientConnected,
                   cli,
                   validNodeNames,
                   clientName,
                   retryWait=1,
                   timeout=3))
    cli.enterCmd("new key")
    cli.enterCmd("status client {}".format(clientName))
    cli.looper.run(
        eventually(checkActiveIdrPrinted, cli, retryWait=1, timeout=3))
    for name in validNodeNames:
        # Checking the output after command `status node <name>`. Testing
        # the node status here after the client is connected
        cli.enterCmd("status node {}".format(name))
        otherNodeNames = (set(validNodeNames) - {
            name,
        })
        node = cli.nodes[name]
        cliLogs = list(cli.printeds)
        if node.hasPrimary:
            checkPrimaryLogs(node, cliLogs)
        else:
            checkNonPrimaryLogs(node, cliLogs)
            checkForNamedTokens(cli.printedTokens[3], otherNodeNames)
        if cli.clients:
            checkForNamedTokens(cli.printedTokens[1], {
                clientName,
            })
def testQueueingReqFromFutureView(delayedPerf, looper, nodeSet, up, client1):
    """
    Test if every node queues 3 Phase requests(PRE-PREPARE, PREPARE and COMMIT)
    that come from a view which is greater than the current view
    """

    f = getMaxFailures(nodeCount)

    # Delay processing of instance change on a node
    nodeA = nodeSet.Alpha
    nodeA.nodeIbStasher.delay(icDelay(60))

    nonPrimReps = getNonPrimaryReplicas(nodeSet, 0)
    # Delay processing of PRE-PREPARE from all non primary replicas of master
    # so master's throughput falls and view changes
    ppDelayer = ppDelay(5, 0)
    for r in nonPrimReps:
        r.node.nodeIbStasher.delay(ppDelayer)

    sendReqsToNodesAndVerifySuffReplies(looper, client1, 4,
                                        timeout=5 * nodeCount)

    # Every node except Node A should have a view change
    for node in nodeSet:
        if node.name != nodeA.name:
            looper.run(eventually(
                partial(checkViewChangeInitiatedForNode, node, 0),
                retryWait=1,
                timeout=20))

    # Node A's view should not have changed yet
    with pytest.raises(AssertionError):
        looper.run(eventually(partial(
            checkViewChangeInitiatedForNode, nodeA, 0),
            retryWait=1,
            timeout=20))

    # NodeA should not have any pending 3 phase request for a later view
    for r in nodeA.replicas:  # type: TestReplica
        assert len(r.threePhaseMsgsForLaterView) == 0

    # Reset delays on incoming messages from all nodes
    for node in nodeSet:
        node.nodeIbStasher.nodelay(ppDelayer)

    # Send one more request
    sendRandomRequest(client1)

    def checkPending3PhaseReqs():
        # Get all replicas that have their primary status decided
        reps = [rep for rep in nodeA.replicas if rep.isPrimary is not None]
        # Atleast one replica should have its primary status decided
        assert len(reps) > 0
        for r in reps:  # type: TestReplica
            logging.debug("primary status for replica {} is {}"
                          .format(r, r.primaryNames))
            assert len(r.threePhaseMsgsForLaterView) > 0

    # NodeA should now have pending 3 phase request for a later view
    looper.run(eventually(checkPending3PhaseReqs, retryWait=1, timeout=30))
Esempio n. 8
0
def checkRequest(cli, operation):
    cName = "Joe"
    cli.enterCmd("new client {}".format(cName))
    # Let client connect to the nodes
    cli.looper.run(eventually(checkClientConnected, cli, list(cli.nodes.keys()), cName, retryWait=1, timeout=5))
    # Send request to all nodes

    createNewKeyring(cName, cli)

    cli.enterCmd("new key {}".format("testkey1"))
    assert "Key created in keyring {}".format(cName) in cli.lastCmdOutput

    cli.enterCmd("client {} send {}".format(cName, operation))
    client = cli.clients[cName]
    wallet = cli.wallets[cName]  # type: Wallet
    f = getMaxFailures(len(cli.nodes))
    # Ensure client gets back the replies
    lastReqId = wallet._getIdData().lastReqId
    cli.looper.run(eventually(checkSufficientRepliesRecvd, client.inBox, lastReqId, f, retryWait=2, timeout=10))

    txn, status = client.getReply(wallet.defaultId, lastReqId)

    # Ensure the cli shows appropriate output
    cli.enterCmd("client {} show {}".format(cName, lastReqId))
    printeds = cli.printeds
    printedReply = printeds[1]
    printedStatus = printeds[0]
    # txnTimePattern = "'txnTime', \d+\.*\d*"
    # txnIdPattern = "'txnId', '" + txn['txnId'] + "'"
    txnTimePattern = "'txnTime': \d+\.*\d*"
    txnIdPattern = "'txnId': '" + txn["txnId"] + "'"
    assert re.search(txnIdPattern, printedReply["msg"])
    assert re.search(txnTimePattern, printedReply["msg"])
    assert printedStatus["msg"] == "Status: {}".format(status)
    return client, wallet
Esempio n. 9
0
def testOrderingCase1(looper, nodeSet, up, client1, wallet1):
    """
    Scenario -> PRE-PREPARE not received by the replica, Request not received
    for ordering by the replica, but received enough commits to start ordering.
    It queues up the request so when a PRE-PREPARE is received or request is
    receievd for ordering, an order can be triggered
    https://www.pivotaltracker.com/story/show/125239401

    Reproducing by - Pick a node with no primary replica, replica ignores
    forwarded request to replica and delay reception of PRE-PREPARE sufficiently
    so that enough COMMITs reach to trigger ordering.
    """
    replica = getNonPrimaryReplicas(nodeSet, instId=0)[0]
    delaysPrePrepareProcessing(replica.node, delay=10, instId=0)

    def doNotProcessReqDigest(self, rd: ReqDigest):
        pass

    patchedMethod = types.MethodType(doNotProcessReqDigest, replica)
    replica.processReqDigest = patchedMethod

    def chk(n):
        assert replica.spylog.count(replica.doOrder.__name__) == n

    sendRandomRequest(wallet1, client1)
    looper.run(eventually(chk, 0, retryWait=1, timeout=5))
    looper.run(eventually(chk, 1, retryWait=1, timeout=15))
def testPrimaryElectionWithTie(electTieFixture, looper, keySharedNodes):
    """
    Primary selection (Rainy Day)
    A, B, C, D, E
    A, B, C, D startup. E is lagging.
    A sees the minimum number of nodes, and then sends Nominate(A)
    At the same exact time, B sees the minimum number of nodes, and then sends out Nominate(B)
    A sees B sending Nominate(B), but it has already nominated itself, so it does nothing
    B sees A sending Nominate(A), but it has already nominated itself, so it does nothing
    C sees A sending Nominate(A), and sends Nominate(A)
    D sees B sending Nominate(B), and sends Nominate(B)
    There's a split. C and A think A is the primary, B and D think B is the primary
    All nodes can see that there is a split. Each sends out Reelection([A,B])

    A and B both see Reelection([A,B]) from themselves as well as the other 3 (the number from others should be at least f+1),

    1. they wait a random amount of time (between 0 and 2 seconds),
    2. they each send out a Nominate(self)

    Voting is repeated until we have a good election.
    """

    # TODO optimize the sending messages in batches, for example, we don't
    #     send messages more often than 400 milliseconds. Once those 400
    #     millis have passed, we send the several queued messages in one
    #     batch.

    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    checkPoolReady(looper, nodeSet.nodes.values())

    for node in nodeSet.nodes.values():
        for instId, replica in enumerate(node.elector.replicas):
            logging.debug("replica {} {} with votes {}".
                          format(replica.name, replica.instId,
                                 node.elector.nominations.get(instId, {})))

    logging.debug("Check nomination")
    # Checking whether Node A nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=1, timeout=10))

    # Checking whether Node B nominated itself
    looper.run(eventually(checkNomination, B, B.name, retryWait=1, timeout=10))

    # Checking whether Node C nominated Node A
    looper.run(eventually(checkNomination, C, A.name, retryWait=1, timeout=10))

    # Checking whether Node D nominated Node D
    looper.run(eventually(checkNomination, D, B.name, retryWait=1, timeout=10))

    # No node should be primary
    for node in nodeSet.nodes.values():
        assert node.hasPrimary is False

    for node in nodeSet.nodes.values():
        node.resetDelays()

    checkProtocolInstanceSetup(looper=looper, nodes=nodeSet, retryWait=1,
                               timeout=60)
Esempio n. 11
0
def nymsAddedInQuickSuccession(nodeSet, addedSponsor, looper, sponsor,
                               sponsorWallet):
    usigner = SimpleSigner()
    nym = usigner.verkey
    idy = Identity(identifier=nym)
    sponsorWallet.addSponsoredIdentity(idy)
    # Creating a NYM request with same nym again
    req = idy.ledgerRequest()
    sponsorWallet._pending.appendleft((req, idy.identifier))
    reqs = sponsorWallet.preparePending()
    sponsor.submitReqs(*reqs)

    def check():
        assert sponsorWallet._sponsored[nym].seqNo

    looper.run(eventually(check, timeout=2))

    looper.run(
        eventually(checkNacks,
                   sponsor,
                   req.reqId,
                   "is already added",
                   retryWait=1,
                   timeout=15))
    count = 0
    for node in nodeSet:
        txns = node.domainLedger.getAllTxn()
        for seq, txn in txns.items():
            if txn[TXN_TYPE] == NYM and txn[TARGET_NYM] == usigner.identifier:
                count += 1

    assert (count == len(nodeSet))
def testPrePrepareWithHighSeqNo(looper, nodeSet, propagated1):
    def chk():
        for r in getNonPrimaryReplicas(nodeSet, instId):
            nodeSuspicions = len(getNodeSuspicions(
                    r.node, Suspicions.WRONG_PPSEQ_NO.code))
            assert nodeSuspicions == 1

    def checkPreprepare(replica, viewNo, ppSeqNo, req, numOfPrePrepares):
        assert (replica.prePrepares[viewNo, ppSeqNo]) == (req.clientId, req.reqId, req.digest)

    primary = getPrimaryReplica(nodeSet, instId)
    nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet, instId)
    req = propagated1.reqDigest
    primary.doPrePrepare(req)
    for np in nonPrimaryReplicas:
        looper.run(
                eventually(checkPreprepare, np, primary.viewNo, primary.prePrepareSeqNo - 1,
                           req, 1,
                           retryWait=.5, timeout=10))

    newReqDigest = ReqDigest(req.clientId, req.reqId + 1, req.digest)
    incorrectPrePrepareReq = PrePrepare(instId,
                               primary.viewNo,
                               primary.prePrepareSeqNo + 2,
                               *newReqDigest)
    primary.send(incorrectPrePrepareReq,TPCStat.PrePrepareSent)
    looper.run(eventually(chk, retryWait=1, timeout=50))
Esempio n. 13
0
def testAdd2NewNodes(looper, txnPoolNodeSet, tdirWithPoolTxns, tconf, steward1,
                     stewardWallet, allPluginsPath):
    """
    Add 2 new nodes to trigger replica addition and primary election
    """
    for nodeName in ("Zeta", "Eta"):
        newStewardName = "testClientSteward"+randomString(3)
        newSteward, newStewardWallet, newNode = addNewStewardAndNode(looper,
                                                   steward1,
                                                   stewardWallet,
                                                   newStewardName,
                                                   nodeName,
                                                   tdirWithPoolTxns, tconf,
                                                   allPluginsPath)
        txnPoolNodeSet.append(newNode)
        looper.run(checkNodesConnected(txnPoolNodeSet))
        logger.debug("{} connected to the pool".format(newNode))
        looper.run(eventually(checkNodeLedgersForEquality, newNode,
                              *txnPoolNodeSet[:-1], retryWait=1, timeout=7))

    f = getMaxFailures(len(txnPoolNodeSet))

    def checkFValue():
        for node in txnPoolNodeSet:
            assert node.f == f
            assert len(node.replicas) == (f + 1)

    looper.run(eventually(checkFValue, retryWait=1, timeout=5))
    checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1,
                               timeout=5)
Esempio n. 14
0
def testOrderingCase1(looper, nodeSet, up, client1, wallet1):
    """
    Scenario -> PRE-PREPARE not received by the replica, Request not received
    for ordering by the replica, but received enough commits to start ordering.
    It queues up the request so when a PRE-PREPARE is received or request is
    receievd for ordering, an order can be triggered
    https://www.pivotaltracker.com/story/show/125239401

    Reproducing by - Pick a node with no primary replica, replica ignores
    forwarded request to replica and delay reception of PRE-PREPARE sufficiently
    so that enough COMMITs reach to trigger ordering.
    """
    replica = getNonPrimaryReplicas(nodeSet, instId=0)[0]
    delaysPrePrepareProcessing(replica.node, delay=10, instId=0)

    def doNotProcessReqDigest(self, rd: ReqDigest):
        pass

    patchedMethod = types.MethodType(doNotProcessReqDigest, replica)
    replica.processReqDigest = patchedMethod

    def chk(n):
        replica.spylog.count(replica.doOrder.__name__) == n

    sendRandomRequest(wallet1, client1)
    looper.run(eventually(chk, 0, retryWait=1, timeout=5))
    looper.run(eventually(chk, 1, retryWait=1, timeout=15))
def testMultipleInstanceChangeMsgsMarkNodeAsSuspicious(looper, nodeSet, up):
    maliciousNode = nodeSet.Alpha
    for i in range(0, 5):
        maliciousNode.send(InstanceChange(i))

    def chk(instId):
        for node in nodeSet:
            if node.name != maliciousNode.name:
                args = getAllArgs(node, Node.processInstanceChange)
                assert len(args) == 5
                for arg in args:
                    assert arg['frm'] == maliciousNode.name

    for i in range(0, 5):
        looper.run(eventually(chk, i, retryWait=1, timeout=20))

    def g():
        for node in nodeSet:
            if node.name != maliciousNode.name:
                frm, reason, code = getAllArgs(node, Node.reportSuspiciousNode)
                assert frm == maliciousNode.name
                assert isinstance(reason, SuspiciousNode)
                assert len(getNodeSuspicions(node,
                                             Suspicions.FREQUENT_INST_CHNG.code)) == 13

    looper.run(eventually(g, retryWait=1, timeout=20))
Esempio n. 16
0
def testReqExecWhenReturnedByMaster(tdir_for_func):
    with TestNodeSet(count=4, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            for n in nodeSet:
                n.startKeySharing()
            client1, wallet1 = setupNodesAndClient(looper,
                                                   nodeSet,
                                                   tmpdir=tdir_for_func)
            req = sendRandomRequest(wallet1, client1)
            looper.run(eventually(checkSufficientRepliesRecvd, client1.inBox,
                                  req.reqId, 1,
                                  retryWait=1, timeout=15))
            async def chk():
                for node in nodeSet:
                    entries = node.spylog.getAll(
                        node.processOrdered.__name__)
                    for entry in entries:
                        arg = entry.params['ordered']
                        result = entry.result
                        if arg.instId == node.instances.masterId:
                            assert result
                        else:
                            assert result is None

            looper.run(eventually(chk, timeout=3))
def testAddNewClient(txnPoolNodeSet, tdirWithPoolTxns,
                     poolTxnStewardData, txnPoolCliNodeReg):
    with Looper(debug=True) as looper:
        name, pkseed, sigseed = poolTxnStewardData
        stewardSigner = SimpleSigner(seed=sigseed)
        client = TestClient(name=name, nodeReg=txnPoolCliNodeReg, ha=genHa(),
                            signer=stewardSigner, basedirpath=tdirWithPoolTxns)
        looper.add(client)
        looper.run(client.ensureConnectedToNodes())
        sigseed = b'55555555555555555555555555555555'
        pkseed = b'66666666666666666666666666666666'
        newSigner = SimpleSigner(sigseed)
        priver = Privateer(pkseed)
        req, = client.submit({
            TXN_TYPE: NEW_CLIENT,
            ORIGIN: client.defaultIdentifier,
            TARGET_NYM: newSigner.verstr,
            DATA: {
                "pubkey": priver.pubhex.decode(),
                "alias": "Robert"
            }
        })

        looper.run(eventually(checkSufficientRepliesRecvd, client.inBox,
                              req.reqId, 1,
                              retryWait=1, timeout=5))

        def chk():
            for node in txnPoolNodeSet:
                assert newSigner.verstr in node.clientAuthNr.clients

        looper.run(eventually(chk, retryWait=1, timeout=5))
Esempio n. 18
0
def testClientRetryRequestWhenAckNotReceived(looper, nodeSet, client1,
                                             wallet1, tconf):
    """
    The client gets disconnected from node say Alpha but does not know it.
    It sends request to all nodes including Alpha, expects ACK and REPLY from
    Alpha too, does not get it, so reconnects to Alpha and sends request again
    and gets REPLY
    """
    alpha = nodeSet.Alpha

    r = alpha.clientstack.getRemote(client1.stackName)
    alpha.clientstack.removeRemote(r)
    req = sendRandomRequest(wallet1, client1)

    def chkAcks():
        for node in nodeSet:
            if node != alpha:
                checkReqAck(client1, node, *req.key)
            else:
                with pytest.raises(AssertionError):
                    checkReqAck(client1, node, *req.key)

    looper.run(eventually(chkAcks, retryWait=1, timeout=3))

    looper.run(eventually(checkReplyCount, client1, *req.key, 4, retryWait=1,
                          timeout=tconf.CLIENT_REQACK_TIMEOUT+5))
Esempio n. 19
0
def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):
    """
    When a request has already been executed the previously executed reply
    will be sent again to the client. An acknowledgement will not be sent
    for a repeated request.
    """
    # Since view no is always zero in the current setup
    looper.run(eventually(checkSufficientRepliesRecvd,
                          client1.inBox,
                          sent1.reqId,
                          2,
                          retryWait=.25,
                          timeout=5))
    orignalRquestResponsesLen = nodeCount * 2
    duplicateRequestRepliesLen = nodeCount  # for a duplicate request we need to
    client1._enqueueIntoAllRemotes(sent1, client1.getSigner())

    def chk():
        assertLength([response for response in client1.inBox
                      if (response[0].get(f.RESULT.nm) and
                      response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or
                      (response[0].get(OP_FIELD_NAME) == REQACK and response[0].get(f.REQ_ID.nm)
                       == sent1.reqId)],
                     orignalRquestResponsesLen +
                     duplicateRequestRepliesLen)

    looper.run(eventually(
            chk,
            retryWait=.25,
            timeout=20))
Esempio n. 20
0
def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):
    """
    When a request has already been executed the previously executed reply
    will be sent again to the client. An acknowledgement will not be sent
    for a repeated request.
    """
    # Since view no is always zero in the current setup
    looper.run(
        eventually(checkSufficientRepliesRecvd,
                   client1.inBox,
                   sent1.reqId,
                   2,
                   retryWait=.25,
                   timeout=5))
    orignalRquestResponsesLen = nodeCount * 2
    duplicateRequestRepliesLen = nodeCount  # for a duplicate request we need to
    client1._enqueueIntoAllRemotes(sent1, client1.getSigner())

    def chk():
        assertLength([
            response for response in client1.inBox
            if (response[0].get(f.RESULT.nm)
                and response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or (
                    response[0].get(OP_FIELD_NAME) == REQACK
                    and response[0].get(f.REQ_ID.nm) == sent1.reqId)
        ], orignalRquestResponsesLen + duplicateRequestRepliesLen)

    looper.run(eventually(chk, retryWait=.25, timeout=20))
Esempio n. 21
0
def testClientRetryRequestWhenReplyNotReceived(looper, nodeSet, client1,
                                               wallet1, tconf):
    """
    A node say Alpha sends ACK but doesn't send REPLY. The connect resends the
    request and gets REPLY
    """

    alpha = nodeSet.Alpha
    skipped = False
    origTrans = alpha.transmitToClient

    def skipReplyOnce(msg, remoteName):
        nonlocal skipped
        if isinstance(msg, Reply) and not skipped:
            skipped = True
            return
        origTrans(msg, remoteName)

    alpha.transmitToClient = skipReplyOnce
    req = sendRandomRequest(wallet1, client1)
    coros = [partial(checkReqAck, client1, node, *req.key) for node in nodeSet]
    looper.run(eventuallyAll(*coros, retryWait=.5, totalTimeout=3))
    looper.run(eventually(checkReplyCount, client1, *req.key, 3, retryWait=1,
                          timeout=3))
    looper.run(eventually(checkReplyCount, client1, *req.key, 4, retryWait=1,
                          timeout=tconf.CLIENT_REPLY_TIMEOUT + 5))
def testPrePrepareWithHighSeqNo(looper, nodeSet, propagated1):
    def chk():
        for r in getNonPrimaryReplicas(nodeSet, instId):
            nodeSuspicions = len(
                getNodeSuspicions(r.node, Suspicions.WRONG_PPSEQ_NO.code))
            assert nodeSuspicions == 1

    def checkPreprepare(replica, viewNo, ppSeqNo, req, numOfPrePrepares):
        assert (replica.prePrepares[viewNo, ppSeqNo][0]) == \
               (req.identifier, req.reqId, req.digest)

    primary = getPrimaryReplica(nodeSet, instId)
    nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet, instId)
    req = propagated1.reqDigest
    primary.doPrePrepare(req)
    for np in nonPrimaryReplicas:
        looper.run(
            eventually(checkPreprepare,
                       np,
                       primary.viewNo,
                       primary.prePrepareSeqNo - 1,
                       req,
                       1,
                       retryWait=.5,
                       timeout=10))

    newReqDigest = ReqDigest(req.identifier, req.reqId + 1, req.digest)
    incorrectPrePrepareReq = PrePrepare(instId, primary.viewNo,
                                        primary.prePrepareSeqNo + 2,
                                        *newReqDigest, time.time())
    primary.send(incorrectPrePrepareReq, TPCStat.PrePrepareSent)
    looper.run(eventually(chk, retryWait=1, timeout=50))
def testMultipleInstanceChangeMsgsMarkNodeAsSuspicious(looper, nodeSet, up):
    maliciousNode = nodeSet.Alpha
    for i in range(0, 5):
        maliciousNode.send(InstanceChange(i))

    def chk(instId):
        for node in nodeSet:
            if node.name != maliciousNode.name:
                args = getAllArgs(node, Node.processInstanceChange)
                assert len(args) == 5
                for arg in args:
                    assert arg['frm'] == maliciousNode.name

    for i in range(0, 5):
        looper.run(eventually(chk, i, retryWait=1, timeout=20))

    def g():
        for node in nodeSet:
            if node.name != maliciousNode.name:
                frm, reason, code = getAllArgs(node, Node.reportSuspiciousNode)
                assert frm == maliciousNode.name
                assert isinstance(reason, SuspiciousNode)
                assert len(
                    getNodeSuspicions(
                        node, Suspicions.FREQUENT_INST_CHNG.code)) == 13

    looper.run(eventually(g, retryWait=1, timeout=20))
Esempio n. 24
0
def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):
    """
    When a request has already been executed the previously executed reply
    will be sent again to the client. An acknowledgement will not be sent
    for a repeated request.
    """
    # Since view no is always zero in the current setup
    looper.run(eventually(checkSufficientRepliesRecvd,
                          client1.inBox,
                          sent1.reqId,
                          2,
                          retryWait=.25,
                          timeout=5))
    orignalRquestResponsesLen = nodeCount * 2
    duplicateRequestRepliesLen = nodeCount  # for a duplicate request we need to
    #  send reply only not any ACK.
    client1._enqueueIntoAllRemotes(sent1)
    # Since view no is always zero in the current setup
    looper.run(eventually(
            lambda: assertLength([response for response in client1.inBox
                                  if response[0]['reqId'] == sent1.reqId],
                                 orignalRquestResponsesLen +
                                 duplicateRequestRepliesLen),
            retryWait=.25,
            timeout=20))
Esempio n. 25
0
def testNodeRemoveUnknownRemote(allPluginsPath, tdirAndLooper):
    """
    The nodes Alpha and Beta know about each other so they should connect but
    they should remove remote for C when it tries to connect to them
    """

    tdir, looper = tdirAndLooper
    names = ["Alpha", "Beta"]
    logger.debug(names)
    nrg = {n: nodeReg[n] for n in names}
    A, B = [TestNode(name, nrg, basedirpath=tdir,
                     pluginPaths=allPluginsPath)
            for name in names]
    for node in (A, B):
        looper.add(node)
        node.startKeySharing()
    looper.run(checkNodesConnected([A, B]))

    C = TestNode("Gamma", {**nrg, **{"Gamma": nodeReg["Gamma"]}},
                 basedirpath=tdir, pluginPaths=allPluginsPath)
    looper.add(C)
    C.startKeySharing(timeout=20)

    def chk():
        assert not C.nodestack.isKeySharing

    looper.run(eventually(chk, retryWait=2, timeout=21))
    C.stop()

    def chk():
        assert C.name not in B.nodestack.nameRemotes
        assert C.name not in A.nodestack.nameRemotes

    looper.run(eventually(chk, retryWait=2, timeout=5))
Esempio n. 26
0
def testReqExecWhenReturnedByMaster(tdir_for_func):
    with TestNodeSet(count=4, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            for n in nodeSet:
                n.startKeySharing()
            client1, wallet1 = setupNodesAndClient(looper,
                                                   nodeSet,
                                                   tmpdir=tdir_for_func)
            req = sendRandomRequest(wallet1, client1)
            looper.run(
                eventually(checkSufficientRepliesRecvd,
                           client1.inBox,
                           req.reqId,
                           1,
                           retryWait=1,
                           timeout=15))

            async def chk():
                for node in nodeSet:
                    entries = node.spylog.getAll(node.processOrdered.__name__)
                    for entry in entries:
                        arg = entry.params['ordered']
                        result = entry.result
                        if arg.instId == node.instances.masterId:
                            assert result
                        else:
                            assert result is None

            looper.run(eventually(chk, timeout=3))
Esempio n. 27
0
def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1):
    """
    Replicas should not accept PRE-PREPARE for view "v" and prepare sequence
    number "n" if it has already accepted a request with view number "v" and
    sequence number "n"

    """
    numOfNodes = 4
    fValue = getMaxFailures(numOfNodes)
    request1 = sendRandomRequest(wallet1, client1)
    result1 = looper.run(
        eventually(checkSufficientRepliesRecvd,
                   client1.inBox,
                   request1.reqId,
                   fValue,
                   retryWait=1,
                   timeout=5))
    logger.debug("request {} gives result {}".format(request1, result1))
    primaryRepl = getPrimaryReplica(nodeSet)
    logger.debug("Primary Replica: {}".format(primaryRepl))
    logger.debug(
        "Decrementing the primary replica's pre-prepare sequence number by "
        "one...")
    primaryRepl.prePrepareSeqNo -= 1
    request2 = sendRandomRequest(wallet1, client1)
    looper.run(
        eventually(checkPrePrepareReqSent,
                   primaryRepl,
                   request2,
                   retryWait=1,
                   timeout=10))

    nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet)
    logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas))
    prePrepareReq = PrePrepare(primaryRepl.instId, primaryRepl.viewNo,
                               primaryRepl.prePrepareSeqNo, wallet1.defaultId,
                               request2.reqId, request2.digest, time.time())

    logger.debug("""Checking whether all the non primary replicas have received
                the pre-prepare request with same sequence number""")
    looper.run(
        eventually(checkPrePrepareReqRecvd,
                   nonPrimaryReplicas,
                   prePrepareReq,
                   retryWait=1,
                   timeout=10))
    logger.debug("""Check that none of the non primary replicas didn't send
    any prepare message "
                             in response to the pre-prepare message""")
    for npr in nonPrimaryReplicas:
        with pytest.raises(AssertionError):
            looper.run(
                eventually(checkPrepareReqSent,
                           npr,
                           wallet1.defaultId,
                           request2.reqId,
                           retryWait=1,
                           timeout=10))
Esempio n. 28
0
def changeNodeHa(looper, txnPoolNodeSet, tdirWithPoolTxns,
                 poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary):

    # prepare new ha for node and client stack
    subjectedNode = None
    stewardName = None
    stewardsSeed = None

    for nodeIndex, n in enumerate(txnPoolNodeSet):
        if (shouldBePrimary and n.primaryReplicaNo == 0) or \
                (not shouldBePrimary and n.primaryReplicaNo != 0):
            subjectedNode = n
            stewardName = poolTxnStewardNames[nodeIndex]
            stewardsSeed = poolTxnData["seeds"][stewardName].encode()
            break

    nodeStackNewHA, clientStackNewHA = genHa(2)
    logger.debug("change HA for node: {} to {}".
                 format(subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))

    nodeSeed = poolTxnData["seeds"][subjectedNode.name].encode()

    # change HA
    stewardClient, req = changeHA(looper, tconf, subjectedNode.name, nodeSeed,
                                  nodeStackNewHA, stewardName, stewardsSeed)
    f = getMaxFailures(len(stewardClient.nodeReg))
    looper.run(eventually(checkSufficientRepliesRecvd, stewardClient.inBox,
                          req.reqId, f, retryWait=1, timeout=20))

    # stop node for which HA will be changed
    subjectedNode.stop()
    looper.removeProdable(subjectedNode)

    # start node with new HA
    restartedNode = TestNode(subjectedNode.name, basedirpath=tdirWithPoolTxns,
                             config=tconf, ha=nodeStackNewHA,
                             cliha=clientStackNewHA)
    looper.add(restartedNode)

    txnPoolNodeSet[nodeIndex] = restartedNode
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=70))
    ensureElectionsDone(looper, txnPoolNodeSet, retryWait=1, timeout=10)

    # start client and check the node HA
    anotherClient, _ = genTestClient(tmpdir=tdirWithPoolTxns,
                                     usePoolLedger=True)
    looper.add(anotherClient)
    looper.run(eventually(anotherClient.ensureConnectedToNodes))
    stewardWallet = Wallet(stewardName)
    stewardWallet.addIdentifier(signer=SimpleSigner(seed=stewardsSeed))
    sendReqsToNodesAndVerifySuffReplies(looper, stewardWallet, stewardClient, 8)
    looper.run(eventually(checkIfGenesisPoolTxnFileUpdated, *txnPoolNodeSet,
                          stewardClient, anotherClient, retryWait=1,
                          timeout=10))
    looper.removeProdable(stewardClient)
def testReplicasRejectSamePrePrepareMsg(looper, nodeSet, client1, wallet1):
    """
    Replicas should not accept PRE-PREPARE for view "v" and prepare sequence
    number "n" if it has already accepted a request with view number "v" and
    sequence number "n"

    """
    numOfNodes = 4
    fValue = getMaxFailures(numOfNodes)
    request1 = sendRandomRequest(wallet1, client1)
    result1 = looper.run(
        eventually(checkSufficientRepliesRecvd, client1.inBox,
                   request1.reqId, fValue,
                   retryWait=1, timeout=5))
    logger.debug("request {} gives result {}".format(request1, result1))
    primaryRepl = getPrimaryReplica(nodeSet)
    logger.debug("Primary Replica: {}".format(primaryRepl))
    logger.debug(
        "Decrementing the primary replica's pre-prepare sequence number by "
        "one...")
    primaryRepl.lastPrePrepareSeqNo -= 1
    request2 = sendRandomRequest(wallet1, client1)
    looper.run(eventually(checkPrePrepareReqSent, primaryRepl, request2,
                          retryWait=1, timeout=10))

    nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet)
    logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas))
    prePrepareReq = PrePrepare(
        primaryRepl.instId,
        primaryRepl.viewNo,
        primaryRepl.lastPrePrepareSeqNo,
        wallet1.defaultId,
        request2.reqId,
        request2.digest,
        time.time()
    )

    logger.debug("""Checking whether all the non primary replicas have received
                the pre-prepare request with same sequence number""")
    looper.run(eventually(checkPrePrepareReqRecvd,
                          nonPrimaryReplicas,
                          prePrepareReq,
                          retryWait=1,
                          timeout=10))
    logger.debug("""Check that none of the non primary replicas didn't send
    any prepare message "
                             in response to the pre-prepare message""")
    for npr in nonPrimaryReplicas:
        with pytest.raises(AssertionError):
            looper.run(eventually(checkPrepareReqSent,
                                  npr,
                                  wallet1.defaultId,
                                  request2.reqId,
                                  retryWait=1,
                                  timeout=10))
Esempio n. 30
0
def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet,
                                          nodeSetWithNodeAddedAfterSomeTxns,
                                          newNodeCaughtUp, tdirWithPoolTxns,
                                          tconf, allPluginsPath):
    """
    Node discards 3-phase and election messages from view nos that it does not
    know of (view nos before it joined the pool)
    :return:
    """
    looper, nodeX, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    viewNo = nodeX.viewNo

    # Delay processing of PRE-PREPARE from all non primary replicas of master
    # so master's performance falls and view changes
    delayNonPrimaries(txnPoolNodeSet, 0, 10)
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 4)
    looper.run(eventually(partial(checkViewNoForNodes, txnPoolNodeSet,
                                  viewNo + 1), retryWait=1, timeout=20))

    newStewardName = "testClientSteward" + randomString(3)
    nodeName = "Theta"
    _, _, nodeTheta = addNewStewardAndNode(looper, client,
                                           wallet,
                                           newStewardName,
                                           nodeName,
                                           tdirWithPoolTxns, tconf,
                                           allPluginsPath)
    txnPoolNodeSet.append(nodeTheta)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(client.ensureConnectedToNodes())
    looper.run(eventually(checkNodeLedgersForEquality, nodeTheta,
                          *txnPoolNodeSet[:-1], retryWait=1, timeout=5))
    checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1,
                               timeout=10)
    electMsg = Nomination(nodeX.name, 0, viewNo)
    threePMsg = PrePrepare(
            0,
            viewNo,
            10,
            wallet.defaultId,
            wallet._getIdData().lastReqId+1,
            "random digest",
            time.time()
            )
    ridTheta = nodeX.nodestack.getRemote(nodeTheta.name).uid
    nodeX.send(electMsg, ridTheta)
    nodeX.send(threePMsg, ridTheta)
    nodeX.send(electMsg, ridTheta)
    looper.run(eventually(checkDiscardMsg, [nodeTheta, ], electMsg,
                          'un-acceptable viewNo', retryWait=1, timeout=5))
    nodeX.send(threePMsg, ridTheta)
    looper.run(eventually(checkDiscardMsg, [nodeTheta, ], threePMsg,
                          'un-acceptable viewNo', retryWait=1, timeout=5))
Esempio n. 31
0
def testPrimaryElectionWithAClearWinner(electContFixture, looper,
                                        keySharedNodes):
    """
    Primary selection (Sunny Day)
    A, B, C, D, E
    A, B, C, D startup. E is lagging.
    A sees the minimum number of nodes first, and then sends out a NOMINATE(A) message
    B, C, D all see the NOMINATE(A) message from A, and respond with NOMINATE(A) message to all other nodes

    A sees three other NOMINATE(A) votes (from B, C, D)
    A sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    B sees two more NOMINATE(A) votes (from C and D)
    B sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    C sees two more NOMINATE(A) votes (from B and D)
    C sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    D sees two more NOMINATE(A) votes (from B and C)
    D sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    A sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary

    B sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary

    C sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary

    D sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary
    """

    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()
    nodesBCD = [B, C, D]

    checkPoolReady(looper, nodeSet)

    # Checking whether one of the replicas of Node A nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=1, timeout=10))

    for n in nodesBCD:
        # Checking whether Node B, C and D nominated Node A
        looper.run(
            eventually(checkNomination, n, A.name, retryWait=1, timeout=10))

    checkProtocolInstanceSetup(looper=looper,
                               nodes=nodeSet,
                               retryWait=1,
                               timeout=10)
    assert A.hasPrimary
Esempio n. 32
0
def testRequestOlderThanStableCheckpointRemoved(
    chkFreqPatched, looper, txnPoolNodeSet, client1, wallet1, client1Connected
):
    reqs = sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, CHK_FREQ - 1, 1)
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, retryWait=1))
    checkRequestCounts(txnPoolNodeSet, len(reqs))
    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 1, 1)
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, 0, retryWait=1))
    checkRequestCounts(txnPoolNodeSet, 0)

    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 3 * CHK_FREQ + 1, 1)
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 2, 0, retryWait=1))
    checkRequestCounts(txnPoolNodeSet, 1)
def testPrimaryElectionWithAClearWinner(electContFixture, looper, keySharedNodes):
    """
    Primary selection (Sunny Day)
    A, B, C, D, E
    A, B, C, D startup. E is lagging.
    A sees the minimum number of nodes first, and then sends out a NOMINATE(A) message
    B, C, D all see the NOMINATE(A) message from A, and respond with NOMINATE(A) message to all other nodes

    A sees three other NOMINATE(A) votes (from B, C, D)
    A sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    B sees two more NOMINATE(A) votes (from C and D)
    B sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    C sees two more NOMINATE(A) votes (from B and D)
    C sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    D sees two more NOMINATE(A) votes (from B and C)
    D sees that A is the clear winner (2f+1 total), and sends PRIMARY(A) to all nodes

    A sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary

    B sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary

    C sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary

    D sees at least two other PRIMARY(A) votes (3 including it's own)
    selects A as primary
    """

    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()
    nodesBCD = [B, C, D]

    checkPoolReady(looper, nodeSet)

    # Checking whether one of the replicas of Node A nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=1, timeout=10))

    for n in nodesBCD:
        # Checking whether Node B, C and D nominated Node A
        looper.run(eventually(checkNomination, n, A.name, retryWait=1,
                              timeout=10))

    checkProtocolInstanceSetup(looper=looper, nodes=nodeSet, retryWait=1,
                               timeout=10)
    assert A.hasPrimary
def testElectionsAfterViewChange(delayedPerf, looper: Looper,
                                 nodeSet: TestNodeSet, up, client1):
    """
    Test that a primary election does happen after a view change
    """

    # Delay processing of PRE-PREPARE from all non primary replicas of master
    # so master's throughput falls
    # and view changes
    nonPrimReps = getNonPrimaryReplicas(nodeSet, 0)
    for r in nonPrimReps:
        r.node.nodeIbStasher.delay(ppDelay(10, 0))

    sendReqsToNodesAndVerifySuffReplies(looper, client1, 4)

    # Ensure view change happened for both node and its primary elector
    for node in nodeSet:
        looper.run(
            eventually(partial(checkViewChangeInitiatedForNode, node, 0),
                       retryWait=1,
                       timeout=20))

    # Ensure elections are done again and pool is setup again with appropriate
    # protocol instances and each protocol instance is setup properly too
    checkProtocolInstanceSetup(looper, nodeSet, retryWait=1, timeout=30)
Esempio n. 35
0
def checkRequest(cli, looper, operation):
    cName = "Joe"
    cli.enterCmd("new client {}".format(cName))
    # Let client connect to the nodes
    looper.runFor(3)
    # Send request to all nodes
    cli.enterCmd('client {} send {}'.format(cName, operation))
    client = cli.clients[cName]
    f = getMaxFailures(len(cli.nodes))
    # Ensure client gets back the replies
    looper.run(eventually(
            checkSufficientRepliesRecvd,
            client.inBox,
            client.lastReqId,
            f,
            retryWait=2,
            timeout=30))

    txn, status = client.getReply(client.lastReqId)

    # Ensure the cli shows appropriate output
    cli.enterCmd('client {} show {}'.format(cName, client.lastReqId))
    printeds = cli.printeds
    printedReply = printeds[1]
    printedStatus = printeds[0]
    txnTimePattern = "\'txnTime\': \d+\.*\d*"
    txnIdPattern = "\'txnId\': '" + txn['txnId'] + "'"
    # txnPattern1 = "Reply for the request: \{" + timePattern + ", " + txnIdPattern + "\}"
    # txnPattern2 = "Reply for the request: \{" + txnIdPattern + ", " + timePattern + "\}"
    # assert re.match(txnPattern1, printedReply['msg']) or \
    #        re.match(txnPattern2, printedReply['msg'])
    assert re.search(txnIdPattern, printedReply['msg'])
    assert re.search(txnTimePattern, printedReply['msg'])
    assert printedStatus['msg'] == "Status: {}".format(status)
Esempio n. 36
0
def testClientNotRetryRequestWhenReqnackReceived(looper, nodeSet, client1,
                                                 wallet1, tconf):
    """
    A node sends REQNACK. The client does not resend Request.
    """

    alpha = nodeSet.Alpha
    origProcReq = alpha.processRequest
    origTrans = alpha.transmitToClient

    def nackReq(self, req, frm):
        self.transmitToClient(RequestNack(*req.key, reason="testing"), frm)

    def onlyTransNack(msg, remoteName):
        if not isinstance(msg, RequestNack):
            return
        origTrans(msg, remoteName)

    alpha.clientMsgRouter.routes[Request] = types.MethodType(nackReq, alpha)
    alpha.transmitToClient = onlyTransNack

    totalResends = client1.spylog.count(client1.resendRequests.__name__)
    req = sendRandomRequest(wallet1, client1)
    # Wait till ACK timeout
    looper.runFor(tconf.CLIENT_REQACK_TIMEOUT+1)
    assert client1.spylog.count(client1.resendRequests.__name__) == totalResends
    # Wait till REPLY timeout
    looper.runFor(tconf.CLIENT_REPLY_TIMEOUT - tconf.CLIENT_REQACK_TIMEOUT + 1)
    assert client1.spylog.count(client1.resendRequests.__name__) == totalResends
    looper.run(eventually(checkReplyCount, client1, *req.key, 3, retryWait=1,
                          timeout=3))
    alpha.clientMsgRouter.routes[Request] = origProcReq
    alpha.transmitToClient = origTrans
Esempio n. 37
0
def checkAcceptInvitation(emptyLooper, nonce, inviteeAgent: WalletedAgent,
                          inviterAgentAndWallet, linkName):
    """
    Assumes link identified by linkName is already created
    """
    assert nonce
    inviterAgent, inviterWallet = inviterAgentAndWallet  # type: WalletedAgent, Wallet

    inviteeWallet = inviteeAgent.wallet
    inviteeAgent.connectTo(linkName)
    ensureAgentsConnected(emptyLooper, inviteeAgent, inviterAgent)

    inviteeAgent.acceptInvitation(linkName)
    inviteeAcceptanceId = inviteeWallet.getLink(linkName,
                                                required=True).localIdentifier
    internalId = inviterAgent.getInternalIdByInvitedNonce(nonce)

    def chk():
        link = inviterWallet.getLinkByInternalId(internalId)
        assert link
        # if not link:
        #     raise RuntimeError("Link not found for internal ID {}".
        #                        format(internalId))
        # TODO: Get link from invitee wallet to check.
        assert link.remoteIdentifier == inviteeAcceptanceId
        assert link.remoteEndPoint[1] == inviteeAgent.endpoint.ha[1]

    emptyLooper.run(eventually(chk))
def testDoNotSendInstChngMsgIfMasterDoesntSeePerformanceProblem(
        nodeSet, looper, ensureView):
    """
    A node that received an INSTANCE_CHANGE message must not send an
    INSTANCE_CHANGE message if it doesn't observe too much difference in
    performance between its replicas.
    """

    curViewNo = ensureView

    # Count sent instance changes of all nodes
    sentInstChanges = {}
    instChngMethodName = Node.sendInstanceChange.__name__
    for n in nodeSet:
        sentInstChanges[n.name] = n.spylog.count(instChngMethodName)

    # Send an instance change message to all nodes
    icMsg = InstanceChange(curViewNo)
    nodeSet.Alpha.send(icMsg)

    # Check that that message is discarded.
    looper.run(eventually(checkViewNoForNodes, nodeSet, timeout=3))
    # No node should have sent a view change and thus must not have called
    # `sendInstanceChange`
    for n in nodeSet:
        assert n.spylog.count(instChngMethodName) == \
                   sentInstChanges.get(n.name, 0)
Esempio n. 39
0
def testNonPrimarySendsAPrePrepare(looper, nodeSet, setup, propagated1):
    primaryReplica = getPrimaryReplica(nodeSet, instId)
    nonPrimaryReplicas = getNonPrimaryReplicas(nodeSet, instId)
    firstNpr = nonPrimaryReplicas[0]
    remainingNpr = nonPrimaryReplicas[1:]

    def sendPrePrepareFromNonPrimary(replica):
        firstNpr.doPrePrepare(propagated1.reqDigest)

        return PrePrepare(
                replica.instId,
                firstNpr.viewNo,
                firstNpr.prePrepareSeqNo,
                propagated1.identifier,
                propagated1.reqId,
                propagated1.digest,
                time.time())

    ppr = sendPrePrepareFromNonPrimary(firstNpr)

    def chk():
        for r in (primaryReplica, *remainingNpr):
            recvdPps = recvdPrePrepare(r)
            assert len(recvdPps) == 1
            assert recvdPps[0]['pp'][:-1] == ppr[:-1]
            nodeSuspicions = len(getNodeSuspicions(
                r.node, Suspicions.PPR_FRM_NON_PRIMARY.code))
            assert nodeSuspicions == 1

    looper.run(eventually(chk,
                          retryWait=.5, timeout=5))
Esempio n. 40
0
def testNodeCatchupAfterRestart(newNodeCaughtUp, txnPoolNodeSet,
                                nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that restarts after some transactions should eventually get the
    transactions which happened while it was down
    :return:
    """

    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    logger.debug("Stopping node {} with pool ledger size {}".
                 format(newNode, newNode.poolManager.txnSeqNo))
    ensureNodeDisconnectedFromPool(looper, txnPoolNodeSet, newNode)
    # for n in txnPoolNodeSet[:4]:
    #     for r in n.nodestack.remotes.values():
    #         if r.name == newNode.name:
    #             r.removeStaleCorrespondents()
    # looper.run(eventually(checkNodeDisconnectedFrom, newNode.name,
    #                       txnPoolNodeSet[:4], retryWait=1, timeout=5))
    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    logger.debug("Starting the stopped node, {}".format(newNode))
    newNode.start(looper.loop)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(eventually(checkNodeLedgersForEquality, newNode,
                          *txnPoolNodeSet[:4], retryWait=1, timeout=15))
Esempio n. 41
0
def testPrimaryElectionCase2(case2Setup, looper, keySharedNodes):
    """
    Case 2 - A node making nominations for a multiple other nodes. Consider 4
    nodes A, B, C, and D. Lets say node B is malicious and nominates node C
    to all nodes. Again node B nominates node D to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    # Node B sends multiple NOMINATE msgs but only after A has nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=.25,
                          timeout=1))

    instId = getSelfNominationByNode(A)

    BRep = Replica.generateName(B.name, instId)
    CRep = Replica.generateName(C.name, instId)
    DRep = Replica.generateName(D.name, instId)

    # Node B first sends NOMINATE msgs for Node C to all nodes
    B.send(Nomination(CRep, instId, B.viewNo))
    # Node B sends NOMINATE msgs for Node D to all nodes
    B.send(Nomination(DRep, instId, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so
    # not considering it) should have nomination for node C from node B since
    #  node B first nominated node C
    for node in [A, C, D]:
        assert node.elector.nominations[instId][BRep] == CRep
Esempio n. 42
0
def testOneNodeAltersAClientRequest(looper,
                                    nodeSet,
                                    setup,
                                    evilAlpha,
                                    sent1):
    checkPropagated(looper, nodeSet, sent1, faultyNodes)

    goodNodes = setup.goodNodes

    def check():
        for node in goodNodes:

            # ensure the nodes are suspicious of Alpha
            params = node.spylog.getLastParams(TestNode.reportSuspiciousNode)
            frm = params["nodeName"]
            reason = params["reason"]
            assert frm == 'Alpha'
            assert reason == InvalidSignature.reason

            # ensure Alpha's propagates were ignored by the other nodes
            key = sent1.clientId, sent1.reqId
            props = node.requests[key].propagates
            assert 'Alpha' not in props
            for good in goodNodes:
                assert good.name in props

    looper.run(eventually(check, retryWait=1, timeout=10))
def testPrimaryElectionCase2(case2Setup, looper, keySharedNodes):
    """
    Case 2 - A node making nominations for a multiple other nodes. Consider 4
    nodes A, B, C, and D. Lets say node B is malicious and nominates node C
    to all nodes. Again node B nominates node D to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    # Node B sends multiple NOMINATE msgs but only after A has nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=.25, timeout=1))

    instId = getSelfNominationByNode(A)

    BRep = Replica.generateName(B.name, instId)
    CRep = Replica.generateName(C.name, instId)
    DRep = Replica.generateName(D.name, instId)

    # Node B first sends NOMINATE msgs for Node C to all nodes
    B.send(Nomination(CRep, instId, B.viewNo))
    # Node B sends NOMINATE msgs for Node D to all nodes
    B.send(Nomination(DRep, instId, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so
    # not considering it) should have nomination for node C from node B since
    #  node B first nominated node C
    for node in [A, C, D]:
        assert node.elector.nominations[instId][BRep] == CRep
Esempio n. 44
0
def addNewNode(looper, stewardClient, stewardWallet, newNodeName, tdir, tconf,
               allPluginsPath=None, autoStart=True):
    sigseed = randomString(32).encode()
    nodeSigner = SimpleSigner(seed=sigseed)

    (nodeIp, nodePort), (clientIp, clientPort) = genHa(2)

    op = {
        TXN_TYPE: NEW_NODE,
        TARGET_NYM: nodeSigner.identifier,
        DATA: {
            NODE_IP: nodeIp,
            NODE_PORT: nodePort,
            CLIENT_IP: clientIp,
            CLIENT_PORT: clientPort,
            ALIAS: newNodeName
        }
    }

    req = stewardWallet.signOp(op)
    stewardClient.submitReqs(req)

    nodeCount = len(stewardClient.nodeReg)
    looper.run(eventually(checkSufficientRepliesRecvd, stewardClient.inBox,
                          req.reqId, 1,
                          retryWait=1, timeout=3 * nodeCount))
    initLocalKeep(newNodeName, tdir, sigseed, override=True)
    node = TestNode(newNodeName, basedirpath=tdir, config=tconf,
                    ha=(nodeIp, nodePort), cliha=(clientIp, clientPort),
                    pluginPaths=allPluginsPath)
    if autoStart:
        looper.add(node)
    return node
def testNodeRequestingTxns(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    """
    A newly joined node is catching up and sends catchup requests to other
    nodes but one of the nodes does not reply and the newly joined node cannot
    complete the process till the timeout and then requests the missing
    transactions.
    """
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns
    # So nodes wont tell the clients about the newly joined node so they
    # dont send any request to the newly joined node
    for node in txnPoolNodeSet:
        node.sendPoolInfoToClients = types.MethodType(lambda x, y: None, node)

    txnPoolNodeSet.append(newNode)

    def ignoreCatchupReq(self, req, frm):
        logger.info("{} being malicious and ignoring catchup request {} "
                    "from {}".format(self, req, frm))

    # One of the node does not process catchup request.
    txnPoolNodeSet[0].nodeMsgRouter.routes[CatchupReq] = types.MethodType(
        ignoreCatchupReq, txnPoolNodeSet[0].ledgerManager)
    sendRandomRequests(wallet, client, 10)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=60))
    looper.run(eventually(checkNodeLedgersForEquality, newNode,
                          *txnPoolNodeSet[:-1], retryWait=1, timeout=90))
Esempio n. 46
0
def testReplyWhenRepliesFromExactlyFPlusOneNodesAreSame(looper, client1):
    """
    When only :math:`2f+1` replies from the nodes are matching, the client
    would accept the reply
    """
    request = sendRandomRequest(client1)
    # exactly f + 1 => (3) nodes have correct responses
    # modify some (numOfResponses of type REPLY - (f + 1)) => 4 responses to
    # have a different operations
    looper.run(
        eventually(assertLength,
                   client1.inBox,
                   2 * nodeCount * request.reqId,
                   retryWait=.25,
                   timeout=15))

    replies = (msg for msg, frm in client1.inBox if msg[OP_FIELD_NAME] == REPLY
               and msg[f.RESULT.nm][f.REQ_ID.nm] == request.reqId)

    # change two responses to something different
    for i in range(2):
        msg = next(replies)
        msg[f.RESULT.nm][TXN_ID] = str(i) + "Some random id"

    checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)
Esempio n. 47
0
def testClientNotRetryingRequestAfterMaxTriesDone(looper, nodeSet, client1,
                                                 wallet1, tconf):
    """
    A client sends Request to a node but the node never responds to client.
    The client resends the request but only the number of times defined in its
    configuration and no more
    """
    alpha = nodeSet.Alpha
    origTrans = alpha.transmitToClient

    def dontTransmitReply(msg, remoteName):
        if isinstance(msg, Reply):
            return
        origTrans(msg, remoteName)

    alpha.transmitToClient = dontTransmitReply

    totalResends = client1.spylog.count(client1.resendRequests.__name__)
    req = sendRandomRequest(wallet1, client1)
    # Wait for more than REPLY timeout
    looper.runFor((tconf.CLIENT_MAX_RETRY_REPLY+2)*tconf.CLIENT_REPLY_TIMEOUT+2)
    looper.run(eventually(checkReplyCount, client1, *req.key, 3, retryWait=1,
                          timeout=3))
    assert client1.spylog.count(client1.resendRequests.__name__) == \
        (totalResends + tconf.CLIENT_MAX_RETRY_REPLY)
    assert req.key not in client1.expectingAcksFor
    assert req.key not in client1.expectingRepliesFor
    alpha.processRequest = origTrans
Esempio n. 48
0
def testNodeRequestingTxns(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    """
    A newly joined node is catching up and sends catchup requests to other
    nodes but one of the nodes does not reply and the newly joined node cannot
    complete the process till the timeout and then requests the missing
    transactions.
    """
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns
    # So nodes wont tell the clients about the newly joined node so they
    # dont send any request to the newly joined node
    for node in txnPoolNodeSet:
        node.sendPoolInfoToClients = types.MethodType(lambda x, y: None, node)

    txnPoolNodeSet.append(newNode)

    def ignoreCatchupReq(self, req, frm):
        logger.info("{} being malicious and ignoring catchup request {} "
                    "from {}".format(self, req, frm))

    # One of the node does not process catchup request.
    txnPoolNodeSet[0].nodeMsgRouter.routes[CatchupReq] = types.MethodType(
        ignoreCatchupReq, txnPoolNodeSet[0].ledgerManager)
    sendRandomRequests(wallet, client, 10)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=60))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   newNode,
                   *txnPoolNodeSet[:-1],
                   retryWait=1,
                   timeout=90))
Esempio n. 49
0
def testPrimaryElectionCase4(case4Setup, looper):
    """
    Case 4 - A node making multiple primary declarations for a particular node.
    Consider 4 nodes A, B, C and D. Lets say node B is malicious and is
    repeatedly declaring Node D as primary
    """
    allNodes = case4Setup
    A, B, C, D = allNodes

    looper.run(checkNodesConnected(allNodes))

    # Node B sends multiple declarations of node D's 0th protocol instance as
    # primary to all nodes
    for i in range(5):
        B.send(Primary(D.name, 0, B.viewNo))

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one primary declaration for node
    # D since node D is slow. The one primary declaration for node D,
    # that nodes A, C and D might have would be because of node B
    def x():
        primDecs = list(node.elector.primaryDeclarations[0].values())
        assert primDecs.count(D.name) <= 1

    for node in (A, C, D):
        looper.run(eventually(x, retryWait=.5, timeout=2))

    ensureElectionsDone(looper=looper, nodes=allNodes,
                        retryWait=1, timeout=45)

    # Node D should not have any primary replica
    assert not D.hasPrimary
Esempio n. 50
0
def checkRequest(cli, looper, operation):
    cName = "Joe"
    cli.enterCmd("new client {}".format(cName))
    # Let client connect to the nodes
    looper.runFor(3)
    # Send request to all nodes
    cli.enterCmd('client {} send {}'.format(cName, operation))
    client = cli.clients[cName]
    f = getMaxFailures(len(cli.nodes))
    # Ensure client gets back the replies
    looper.run(eventually(
            checkSufficientRepliesRecvd,
            client.inBox,
            client.lastReqId,
            f,
            retryWait=2,
            timeout=30))

    txn, status = client.getReply(client.lastReqId)

    # Ensure the cli shows appropriate output
    cli.enterCmd('client {} show {}'.format(cName, client.lastReqId))
    printeds = cli.printeds
    printedReply = printeds[1]
    printedStatus = printeds[0]
    assert printedReply['msg'] == "Reply for the request: {{'txnId': '{}" \
                                  "'}}".format(txn['txnId'])
    assert printedStatus['msg'] == "Status: {}".format(status)
Esempio n. 51
0
def checkRequest(cli, looper, operation):
    cName = "Joe"
    cli.enterCmd("new client {}".format(cName))
    # Let client connect to the nodes
    looper.runFor(3)
    # Send request to all nodes
    cli.enterCmd('client {} send {}'.format(cName, operation))
    client = cli.clients[cName]
    f = getMaxFailures(len(cli.nodes))
    # Ensure client gets back the replies
    looper.run(
        eventually(checkSufficientRepliesRecvd,
                   client.inBox,
                   client.lastReqId,
                   f,
                   retryWait=2,
                   timeout=30))

    txn, status = client.getReply(client.lastReqId)

    # Ensure the cli shows appropriate output
    cli.enterCmd('client {} show {}'.format(cName, client.lastReqId))
    printeds = cli.printeds
    printedReply = printeds[1]
    printedStatus = printeds[0]
    txnTimePattern = "\'txnTime\': \d+\.*\d*"
    txnIdPattern = "\'txnId\': '" + txn['txnId'] + "'"
    # txnPattern1 = "Reply for the request: \{" + timePattern + ", " + txnIdPattern + "\}"
    # txnPattern2 = "Reply for the request: \{" + txnIdPattern + ", " + timePattern + "\}"
    # assert re.match(txnPattern1, printedReply['msg']) or \
    #        re.match(txnPattern2, printedReply['msg'])
    assert re.search(txnIdPattern, printedReply['msg'])
    assert re.search(txnTimePattern, printedReply['msg'])
    assert printedStatus['msg'] == "Status: {}".format(status)
Esempio n. 52
0
def changeNodeKeys(looper, stewardClient, stewardWallet, node, verkey):
    nodeNym = hexToFriendly(node.nodestack.local.signer.verhex)

    op = {
        TXN_TYPE: CHANGE_KEYS,
        TARGET_NYM: nodeNym,
        DATA: {
            VERKEY: verkey,
            ALIAS: node.name
        }
    }
    req = stewardWallet.signOp(op)
    stewardClient.submitReqs(req)

    looper.run(
        eventually(checkSufficientRepliesRecvd,
                   stewardClient.inBox,
                   req.reqId,
                   1,
                   retryWait=1,
                   timeout=5))
    node.nodestack.clearLocalRoleKeep()
    node.nodestack.clearRemoteRoleKeeps()
    node.nodestack.clearAllDir()
    node.clientstack.clearLocalRoleKeep()
    node.clientstack.clearRemoteRoleKeeps()
    node.clientstack.clearAllDir()
Esempio n. 53
0
def testNodeCatchupAfterRestart(newNodeCaughtUp, txnPoolNodeSet,
                                nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that restarts after some transactions should eventually get the
    transactions which happened while it was down
    :return:
    """

    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    logger.debug("Stopping node {} with pool ledger size {}".format(
        newNode, newNode.poolManager.txnSeqNo))
    ensureNodeDisconnectedFromPool(looper, txnPoolNodeSet, newNode)
    # for n in txnPoolNodeSet[:4]:
    #     for r in n.nodestack.remotes.values():
    #         if r.name == newNode.name:
    #             r.removeStaleCorrespondents()
    # looper.run(eventually(checkNodeDisconnectedFrom, newNode.name,
    #                       txnPoolNodeSet[:4], retryWait=1, timeout=5))
    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    logger.debug("Starting the stopped node, {}".format(newNode))
    newNode.start(looper.loop)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   newNode,
                   *txnPoolNodeSet[:4],
                   retryWait=1,
                   timeout=15))
Esempio n. 54
0
def addNewClient(role, looper, creatorClient: Client, creatorWallet: Wallet,
                 name: str):
    wallet = Wallet(name)
    wallet.addIdentifier()
    idr = wallet.defaultId

    op = {
        TXN_TYPE: NYM,
        ROLE: role,
        TARGET_NYM: idr,
        ALIAS: name,
        VERKEY: wallet.getVerkey(idr)
    }

    req = creatorWallet.signOp(op)
    creatorClient.submitReqs(req)

    nodeCount = len(creatorClient.nodeReg)
    looper.run(
        eventually(checkSufficientRepliesRecvd,
                   creatorClient.inBox,
                   req.reqId,
                   1,
                   retryWait=1,
                   timeout=3 * nodeCount))
    return wallet
Esempio n. 55
0
def changeNodeHa(looper, stewardClient, stewardWallet, node, nodeHa, clientHa):
    nodeNym = hexToFriendly(node.nodestack.local.signer.verhex)
    (nodeIp, nodePort), (clientIp, clientPort) = nodeHa, clientHa
    op = {
        TXN_TYPE: CHANGE_HA,
        TARGET_NYM: nodeNym,
        DATA: {
            NODE_IP: nodeIp,
            NODE_PORT: nodePort,
            CLIENT_IP: clientIp,
            CLIENT_PORT: clientPort,
            ALIAS: node.name
        }
    }

    req = stewardWallet.signOp(op)
    stewardClient.submitReqs(req)
    looper.run(
        eventually(checkSufficientRepliesRecvd,
                   stewardClient.inBox,
                   req.reqId,
                   1,
                   retryWait=1,
                   timeout=5))
    node.nodestack.clearLocalKeep()
    node.nodestack.clearRemoteKeeps()
    node.clientstack.clearLocalKeep()
    node.clientstack.clearRemoteKeeps()
Esempio n. 56
0
def testProverGetsCredDef(credentialDefinitionAdded, userWalletA, tdir,
                          nodeSet, looper, sponsorWallet, credDef,
                          curiousClient):
    """
    A credential definition is received
    """

    # Don't move below import outside of this method
    # else that client class doesn't gets reloaded
    # and hence it doesn't get updated with correct plugin class/methods
    # and it gives error (for permanent solution bug is created: #130181205).
    # from sovrin.test.helper import genTestClient
    #

    definition = credDef.get(serFmt=SerFmt.base58)
    credDefKey = (definition[NAME], definition[VERSION],
                  sponsorWallet.defaultId)
    req = userWalletA.requestCredDef(credDefKey, userWalletA.defaultId)
    curiousClient.submitReqs(req)

    looper.run(
        eventually(checkSufficientRepliesRecvd,
                   curiousClient.inBox,
                   req.reqId,
                   nodeSet.f,
                   retryWait=1,
                   timeout=5))
    reply, status = curiousClient.getReply(req.reqId)
    assert status == "CONFIRMED"
    recvdCredDef = json.loads(reply[DATA])
    assert recvdCredDef[NAME] == definition[NAME]
    assert recvdCredDef[VERSION] == definition[VERSION]
    assert recvdCredDef[ATTR_NAMES].split(",") == definition[ATTR_NAMES]
 def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):
     logging.info("Add back the {} node and see status of {}".
                  format(ordinal(nodeIdx + 1), expectedStatus))
     addNodeBack(nodeSet, looper, nodeNames[nodeIdx])
     looper.run(
             eventually(checkNodeStatusRemotesAndF, expectedStatus,
                        nodeIdx,
                        retryWait=1, timeout=30))
def testInstChangeWithMoreReqLat(looper, setup):
    nodes = setup.nodes
    for node in nodes:
        node.checkPerformance()
        assert any(getAllReturnVals(node.monitor,
                                    node.monitor.isMasterReqLatencyTooHigh))
    looper.run(eventually(partial(checkViewNoForNodes, nodes, 1),
                          retryWait=1, timeout=20))