Ejemplo n.º 1
0
def setupNodesAndClient(looper: Looper, nodes: Sequence[TestNode], nodeReg=None,
                        tmpdir=None):
    looper.run(checkNodesConnected(nodes))
    timeout = 15 + 2 * (len(nodes))
    ensureElectionsDone(looper=looper, nodes=nodes, retryWait=1,
                        timeout=timeout)
    return setupClient(looper, nodes, nodeReg=nodeReg, tmpdir=tmpdir)
Ejemplo n.º 2
0
def setupNodesAndClient(looper: Looper, nodes: Sequence[TestNode], nodeReg=None,
                        tmpdir=None):
    looper.run(checkNodesConnected(nodes))
    timeout = 15 + 2 * (len(nodes))
    ensureElectionsDone(looper=looper, nodes=nodes, retryWait=1,
                        timeout=timeout)
    return setupClient(looper, nodes, nodeReg=nodeReg, tmpdir=tmpdir)
Ejemplo n.º 3
0
def checkIfSameReplicaIPrimary(looper: Looper,
                               replicas: Sequence[TestReplica] = None,
                               retryWait: float = 1,
                               timeout: float = 20):
    # One and only one primary should be found and every replica should agree
    # on same primary

    def checkElectionDone():
        unknowns = sum(1 for r in replicas if r.isPrimary is None)
        assert unknowns == 0, "election should be complete, but {} out of {} " \
                              "don't know who the primary is for protocol no {}" \
            .format(unknowns, len(replicas), replicas[0].instId)

    def checkPrisAreOne():  # number of expected primaries
        pris = sum(1 for r in replicas if r.isPrimary)
        assert pris == 1, "Primary count should be 1, but was {} for protocol no {}" \
            .format(pris, replicas[0].instId)

    def checkPrisAreSame():
        pris = {r.primaryName for r in replicas}
        assert len(pris) == 1, "Primary should be same for all, but were {} " \
                               "for protocol no {}" \
            .format(pris, replicas[0].instId)
    looper.run(
        eventuallyAll(checkElectionDone, checkPrisAreOne, checkPrisAreSame,
                      retryWait=retryWait, totalTimeout=timeout))
def testElectionsAfterViewChange(delayedPerf, looper: Looper,
                                 nodeSet: TestNodeSet, up, client1):
    """
    Test that a primary election does happen after a view change
    """

    # Delay processing of PRE-PREPARE from all non primary replicas of master
    # so master's throughput falls
    # and view changes
    nonPrimReps = getNonPrimaryReplicas(nodeSet, 0)
    for r in nonPrimReps:
        r.node.nodeIbStasher.delay(ppDelay(10, 0))

    sendReqsToNodesAndVerifySuffReplies(looper, client1, 4)

    # Ensure view change happened for both node and its primary elector
    for node in nodeSet:
        looper.run(
            eventually(partial(checkViewChangeInitiatedForNode, node, 0),
                       retryWait=1,
                       timeout=20))

    # Ensure elections are done again and pool is setup again with appropriate
    # protocol instances and each protocol instance is setup properly too
    checkProtocolInstanceSetup(looper, nodeSet, retryWait=1, timeout=30)
Ejemplo n.º 5
0
def checkIfSameReplicaIPrimary(looper: Looper,
                               replicas: Sequence[TestReplica] = None,
                               retryWait: float = 1,
                               timeout: float = 20):
    # One and only one primary should be found and every replica should agree
    # on same primary

    def checkElectionDone():
        unknowns = sum(1 for r in replicas if r.isPrimary is None)
        assert unknowns == 0, "election should be complete, but {} out of {} " \
                              "don't know who the primary is for protocol no {}" \
            .format(unknowns, len(replicas), replicas[0].instId)

    def checkPrisAreOne():  # number of expected primaries
        pris = sum(1 for r in replicas if r.isPrimary)
        assert pris == 1, "Primary count should be 1, but was {} for protocol no {}" \
            .format(pris, replicas[0].instId)

    def checkPrisAreSame():
        pris = {r.primaryName for r in replicas}
        assert len(pris) == 1, "Primary should be same for all, but were {} " \
                               "for protocol no {}" \
            .format(pris, replicas[0].instId)

    looper.run(
        eventuallyAll(checkElectionDone,
                      checkPrisAreOne,
                      checkPrisAreSame,
                      retryWait=retryWait,
                      totalTimeout=timeout))
Ejemplo n.º 6
0
def testAvgReqLatency(looper: Looper, nodeSet: TestNodeSet, wallet1, client1):
    """
    Checking if average latency is being set
    """

    for i in range(5):
        req = sendRandomRequest(wallet1, client1)
        looper.run(
            eventually(checkSufficientRepliesRecvd,
                       client1.inBox,
                       req.reqId,
                       1,
                       retryWait=1,
                       timeout=5))

    for node in nodeSet:  # type: Node
        mLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId,
                                                   node.instances.masterId)
        bLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId,
                                                   *node.instances.backupIds)
        logger.debug(
            "Avg. master latency : {}. Avg. backup latency: {}".format(
                mLat, bLat))
        assert mLat > 0
        assert bLat > 0
Ejemplo n.º 7
0
def setupClient(looper: Looper,
                nodes: Sequence[TestNode] = None,
                nodeReg=None,
                tmpdir=None):
    client1 = genTestClient(nodes=nodes, nodeReg=nodeReg, tmpdir=tmpdir)
    looper.add(client1)
    looper.run(client1.ensureConnectedToNodes())
    return client1
Ejemplo n.º 8
0
def checkPoolReady(looper: Looper,
                   nodes: Sequence[TestNode],
                   timeout: int = 20):
    looper.run(
        eventually(checkNodesAreReady,
                   nodes,
                   retryWait=.25,
                   timeout=timeout,
                   ratchetSteps=10))
Ejemplo n.º 9
0
def setupClient(looper: Looper,
                nodes: Sequence[TestNode] = None,
                nodeReg=None,
                tmpdir=None):
    client1 = genTestClient(nodes=nodes,
                            nodeReg=nodeReg,
                            tmpdir=tmpdir)
    looper.add(client1)
    looper.run(client1.ensureConnectedToNodes())
    return client1
Ejemplo n.º 10
0
def setupNodesAndClientAndSendRandomReq(looper: Looper,
                                        nodes: Sequence[TestNode], nodeReg=None,
                                        tmpdir=None):
    _client = setupNodesAndClient(looper, nodes, nodeReg, tmpdir)
    request = sendRandomRequest(_client)
    timeout = 3 * len(nodes)
    looper.run(eventually(checkSufficientRepliesRecvd,
                          _client.inBox,
                          request.reqId, 1,
                          retryWait=1, timeout=timeout))
    return _client, request
Ejemplo n.º 11
0
def checkEveryNodeHasAtMostOnePrimary(looper: Looper,
                                      nodes: Sequence[TestNode],
                                      retryWait: float = None,
                                      timeout: float = None):
    def checkAtMostOnePrim(node):
        prims = [r for r in node.replicas if r.isPrimary]
        assert len(prims) <= 1

    for node in nodes:
        looper.run(eventually(checkAtMostOnePrim,
                              node,
                              retryWait=retryWait,
                              timeout=timeout))
Ejemplo n.º 12
0
def prepareNodeSet(looper: Looper, nodeSet: TestNodeSet):
    # TODO: Come up with a more specific name for this

    for n in nodeSet:
        n.startKeySharing()

    # Key sharing party
    looper.run(checkNodesConnected(nodeSet))

    # Remove all the nodes
    for n in list(nodeSet.nodes.keys()):
        looper.removeProdable(nodeSet.nodes[n])
        nodeSet.removeNode(n, shouldClean=False)
Ejemplo n.º 13
0
def sendReqsToNodesAndVerifySuffReplies(looper: Looper, client: TestClient,
                                        numReqs: int, fVal: int=None,
                                        timeout: float=None):
    nodeCount = len(client.nodeReg)
    fVal = fVal or getMaxFailures(nodeCount)
    timeout = timeout or 3 * nodeCount

    requests = sendRandomRequests(client, numReqs)
    for request in requests:
        looper.run(eventually(checkSufficientRepliesRecvd, client.inBox,
                              request.reqId, fVal,
                              retryWait=1, timeout=timeout))
    return requests
Ejemplo n.º 14
0
def prepareNodeSet(looper: Looper, nodeSet: TestNodeSet):
    # TODO: Come up with a more specific name for this

    for n in nodeSet:
        n.startKeySharing()

    # Key sharing party
    looper.run(checkNodesConnected(nodeSet))

    # Remove all the nodes
    for n in list(nodeSet.nodes.keys()):
        looper.removeProdable(nodeSet.nodes[n])
        nodeSet.removeNode(n, shouldClean=False)
def testPostingThroughput(postingStatsEnabled, looper: Looper,
                          nodeSet: TestNodeSet, wallet1, client1):
    """
    The throughput after `DashboardUpdateFreq` seconds and before sending any
    requests should be zero.
    Send `n` requests in less than `ThroughputWindowSize` seconds and the
    throughput till `ThroughputWindowSize` should consider those `n` requests.
    After `ThroughputWindowSize` seconds the throughput should be zero
    Test `totalRequests` too.
    """

    # We are sleeping for this window size, because we need to clear previous
    # values that were being stored for this much time in tests
    looper.runFor(config.ThroughputWindowSize)

    reqCount = 10
    for node in nodeSet:
        assert node.monitor.highResThroughput == 0
        assert node.monitor.totalRequests == 0

    sendReqsToNodesAndVerifySuffReplies(looper,
                                        wallet1,
                                        client1,
                                        reqCount,
                                        nodeSet.f,
                                        timeoutPerReq=20)

    for node in nodeSet:
        assert len(node.monitor.orderedRequestsInLast) == reqCount
        assert node.monitor.highResThroughput > 0
        assert node.monitor.totalRequests == reqCount
        # TODO: Add implementation to actually call firebase plugin
        # and test if firebase plugin is sending total request count
        # if node is primary

    looper.runFor(config.DashboardUpdateFreq)

    for node in nodeSet:
        node.monitor.spylog.count(Monitor.sendThroughput.__name__) > 0

    # Run for latency window duration so that `orderedRequestsInLast`
    # becomes empty
    looper.runFor(config.ThroughputWindowSize)

    def chk():
        for node in nodeSet:
            assert len(node.monitor.orderedRequestsInLast) == 0
            assert node.monitor.highResThroughput == 0
            assert node.monitor.totalRequests == reqCount

    looper.run(eventually(chk, retryWait=1, timeout=10))
Ejemplo n.º 16
0
def setupClient(looper: Looper,
                nodes: Sequence[TestNode] = None,
                nodeReg=None,
                tmpdir=None,
                identifier=None,
                verkey=None):
    client1, wallet = genTestClient(nodes=nodes,
                                    nodeReg=nodeReg,
                                    tmpdir=tmpdir,
                                    identifier=identifier,
                                    verkey=verkey)
    looper.add(client1)
    looper.run(client1.ensureConnectedToNodes())
    return client1, wallet
Ejemplo n.º 17
0
def checkEveryNodeHasAtMostOnePrimary(looper: Looper,
                                      nodes: Sequence[TestNode],
                                      retryWait: float = None,
                                      timeout: float = None):
    def checkAtMostOnePrim(node):
        prims = [r for r in node.replicas if r.isPrimary]
        assert len(prims) <= 1

    for node in nodes:
        looper.run(
            eventually(checkAtMostOnePrim,
                       node,
                       retryWait=retryWait,
                       timeout=timeout))
Ejemplo n.º 18
0
def setupNodesAndClientAndSendRandomReq(looper: Looper,
                                        nodes: Sequence[TestNode],
                                        nodeReg=None,
                                        tmpdir=None):
    _client = setupNodesAndClient(looper, nodes, nodeReg, tmpdir)
    request = sendRandomRequest(_client)
    timeout = 3 * len(nodes)
    looper.run(
        eventually(checkSufficientRepliesRecvd,
                   _client.inBox,
                   request.reqId,
                   1,
                   retryWait=1,
                   timeout=timeout))
    return _client, request
Ejemplo n.º 19
0
def testPostingThroughput(postingStatsEnabled, looper: Looper,
                          nodeSet: TestNodeSet,
                          wallet1, client1):
    """
    The throughput after `DashboardUpdateFreq` seconds and before sending any
    requests should be zero.
    Send `n` requests in less than `ThroughputWindowSize` seconds and the
    throughput till `ThroughputWindowSize` should consider those `n` requests.
    After `ThroughputWindowSize` seconds the throughput should be zero
    Test `totalRequests` too.
    """

    # We are sleeping for this window size, because we need to clear previous
    # values that were being stored for this much time in tests
    looper.runFor(config.ThroughputWindowSize)

    reqCount = 10
    for node in nodeSet:
        assert node.monitor.highResThroughput == 0
        assert node.monitor.totalRequests == 0

    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, reqCount, nodeSet.f,
                                        timeoutPerReq=20)

    for node in nodeSet:
        assert len(node.monitor.orderedRequestsInLast) == reqCount
        assert node.monitor.highResThroughput > 0
        assert node.monitor.totalRequests == reqCount
        # TODO: Add implementation to actually call firebase plugin
        # and test if firebase plugin is sending total request count
        # if node is primary

    looper.runFor(config.DashboardUpdateFreq)

    for node in nodeSet:
        node.monitor.spylog.count(Monitor.sendThroughput.__name__) > 0

    # Run for latency window duration so that `orderedRequestsInLast`
    # becomes empty
    looper.runFor(config.ThroughputWindowSize)

    def chk():
        for node in nodeSet:
            assert len(node.monitor.orderedRequestsInLast) == 0
            assert node.monitor.highResThroughput == 0
            assert node.monitor.totalRequests == reqCount

    looper.run(eventually(chk, retryWait=1, timeout=10))
Ejemplo n.º 20
0
def sendReqsToNodesAndVerifySuffReplies(looper: Looper,
                                        client: TestClient,
                                        numReqs: int,
                                        fVal: int = None,
                                        timeout: float = None):
    nodeCount = len(client.nodeReg)
    fVal = fVal or getMaxFailures(nodeCount)
    timeout = timeout or 3 * nodeCount

    requests = sendRandomRequests(client, numReqs)
    for request in requests:
        looper.run(
            eventually(checkSufficientRepliesRecvd,
                       client.inBox,
                       request.reqId,
                       fVal,
                       retryWait=1,
                       timeout=timeout))
    return requests
def testPostingLatency(postingStatsEnabled, looper: Looper,
                       nodeSet: TestNodeSet, wallet1, client1):
    """
    The latencies (master as well as average of backups) after
    `DashboardUpdateFreq` seconds and before sending any requests should be zero.
    Send `n` requests in less than `LatencyWindowSize` seconds and the
    latency till `LatencyWindowSize` should consider those `n` requests.
    After `LatencyWindowSize` seconds the latencies should be zero
    """
    # Run for latency window duration so that `latenciesByMasterInLast` and
    # `latenciesByBackupsInLast` become empty
    looper.runFor(config.LatencyWindowSize)
    reqCount = 10
    for node in nodeSet:
        assert node.monitor.masterLatency == 0
        assert node.monitor.avgBackupLatency == 0

    sendReqsToNodesAndVerifySuffReplies(looper,
                                        wallet1,
                                        client1,
                                        reqCount,
                                        nodeSet.f,
                                        timeoutPerReq=20)

    for node in nodeSet:
        assert node.monitor.masterLatency > 0
        assert node.monitor.avgBackupLatency > 0

    looper.runFor(config.DashboardUpdateFreq)

    for node in nodeSet:
        node.monitor.spylog.count(Monitor.sendLatencies.__name__) > 0

    # Run for latency window duration so that `latenciesByMasterInLast` and
    # `latenciesByBackupsInLast` become empty
    looper.runFor(config.LatencyWindowSize)

    def chk():
        for node in nodeSet:
            assert node.monitor.masterLatency == 0
            assert node.monitor.avgBackupLatency == 0

    looper.run(eventually(chk, retryWait=1, timeout=10))
Ejemplo n.º 22
0
def testAvgReqLatency(looper: Looper, nodeSet: TestNodeSet, wallet1, client1):
    """
    Checking if average latency is being set
    """

    for i in range(5):
        req = sendRandomRequest(wallet1, client1)
        looper.run(eventually(checkSufficientRepliesRecvd,
                              client1.inBox, req.reqId, 1,
                              retryWait=1, timeout=5))

    for node in nodeSet:  # type: Node
        mLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId,
                                                   node.instances.masterId)
        bLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId,
                                                   *node.instances.backupIds)
        logger.debug("Avg. master latency : {}. Avg. backup latency: {}".
                      format(mLat, bLat))
        assert mLat > 0
        assert bLat > 0
Ejemplo n.º 23
0
def testPostingLatency(postingStatsEnabled, looper: Looper,
                          nodeSet: TestNodeSet,
                          wallet1, client1):
    """
    The latencies (master as well as average of backups) after
    `DashboardUpdateFreq` seconds and before sending any requests should be zero.
    Send `n` requests in less than `LatencyWindowSize` seconds and the
    latency till `LatencyWindowSize` should consider those `n` requests.
    After `LatencyWindowSize` seconds the latencies should be zero
    """
    # Run for latency window duration so that `latenciesByMasterInLast` and
    # `latenciesByBackupsInLast` become empty
    looper.runFor(config.LatencyWindowSize)
    reqCount = 10
    for node in nodeSet:
        assert node.monitor.masterLatency == 0
        assert node.monitor.avgBackupLatency == 0

    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, reqCount,
                                        nodeSet.f,
                                        timeoutPerReq=20)

    for node in nodeSet:
        assert node.monitor.masterLatency > 0
        assert node.monitor.avgBackupLatency > 0

    looper.runFor(config.DashboardUpdateFreq)

    for node in nodeSet:
        node.monitor.spylog.count(Monitor.sendLatencies.__name__) > 0

    # Run for latency window duration so that `latenciesByMasterInLast` and
    # `latenciesByBackupsInLast` become empty
    looper.runFor(config.LatencyWindowSize)

    def chk():
        for node in nodeSet:
            assert node.monitor.masterLatency == 0
            assert node.monitor.avgBackupLatency == 0

    looper.run(eventually(chk, retryWait=1, timeout=10))
def testElectionsAfterViewChange(delayedPerf, looper: Looper, nodeSet: TestNodeSet, up, client1):
    """
    Test that a primary election does happen after a view change
    """

    # Delay processing of PRE-PREPARE from all non primary replicas of master
    # so master's throughput falls
    # and view changes
    nonPrimReps = getNonPrimaryReplicas(nodeSet, 0)
    for r in nonPrimReps:
        r.node.nodeIbStasher.delay(ppDelay(10, 0))

    sendReqsToNodesAndVerifySuffReplies(looper, client1, 4)

    # Ensure view change happened for both node and its primary elector
    for node in nodeSet:
        looper.run(eventually(partial(checkViewChangeInitiatedForNode, node, 0),
                              retryWait=1, timeout=20))

    # Ensure elections are done again and pool is setup again with appropriate
    # protocol instances and each protocol instance is setup properly too
    checkProtocolInstanceSetup(looper, nodeSet, retryWait=1, timeout=30)
Ejemplo n.º 25
0
                    primaryDecider=None)


def whitelistClient(nodes, *clientNames):
    for node in nodes:
        for nm in clientNames:
            node.whitelistClient(nm)


looper = Looper(nodes, autoStart=True)
for node in nodes:
    node.startKeySharing()
    node.start(looper)
    # node.addGenesisTxns(genesisTxns(stewardSigner))

looper.run(checkNodesConnected(nodes))
ensureElectionsDone(looper=looper, nodes=nodes, retryWait=1, timeout=30)

steward, _ = genTestClient(nodes, tmpdir=tdir)
# whitelistClient(nodes, steward.name)
steward.registerObserver(stewardWallet.handleIncomingReply)
looper.add(steward)
looper.run(steward.ensureConnectedToNodes())
makePendingTxnsRequest(steward, stewardWallet)

createNym(looper, sponsorWallet.defaultId, steward, stewardWallet, SPONSOR)

sponsor, _ = genTestClient(nodes, tmpdir=tdir)
sponsor.registerObserver(sponsorWallet.handleIncomingReply)
# whitelistClient(nodes, sponsor.name)
looper.add(sponsor)
Ejemplo n.º 26
0
def checkPoolReady(looper: Looper, nodes: Sequence[TestNode],
                   timeout: int = 20):
    looper.run(
            eventually(checkNodesAreReady, nodes, retryWait=.25,
                       timeout=timeout,
                       ratchetSteps=10))