def testAvgReqLatency(looper: Looper, nodeSet: TestNodeSet, wallet1, client1): """ Checking if average latency is being set """ for i in range(5): req = sendRandomRequest(wallet1, client1) looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=5)) for node in nodeSet: # type: Node mLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, node.instances.masterId) bLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, *node.instances.backupIds) logger.debug( "Avg. master latency : {}. Avg. backup latency: {}".format( mLat, bLat)) assert mLat > 0 assert bLat > 0
def setupNodesAndClient(looper: Looper, nodes: Sequence[TestNode], nodeReg=None, tmpdir=None): looper.run(checkNodesConnected(nodes)) timeout = 15 + 2 * (len(nodes)) ensureElectionsDone(looper=looper, nodes=nodes, retryWait=1, timeout=timeout) return setupClient(looper, nodes, nodeReg=nodeReg, tmpdir=tmpdir)
def checkIfSameReplicaIPrimary(looper: Looper, replicas: Sequence[TestReplica] = None, retryWait: float = 1, timeout: float = 20): # One and only one primary should be found and every replica should agree # on same primary def checkElectionDone(): unknowns = sum(1 for r in replicas if r.isPrimary is None) assert unknowns == 0, "election should be complete, but {} out of {} " \ "don't know who the primary is for protocol no {}" \ .format(unknowns, len(replicas), replicas[0].instId) def checkPrisAreOne(): # number of expected primaries pris = sum(1 for r in replicas if r.isPrimary) assert pris == 1, "Primary count should be 1, but was {} for protocol no {}" \ .format(pris, replicas[0].instId) def checkPrisAreSame(): pris = {r.primaryName for r in replicas} assert len(pris) == 1, "Primary should be same for all, but were {} " \ "for protocol no {}" \ .format(pris, replicas[0].instId) looper.run( eventuallyAll(checkElectionDone, checkPrisAreOne, checkPrisAreSame, retryWait=retryWait, totalTimeout=timeout))
def testElectionsAfterViewChange(delayedPerf, looper: Looper, nodeSet: TestNodeSet, up, client1): """ Test that a primary election does happen after a view change """ # Delay processing of PRE-PREPARE from all non primary replicas of master # so master's throughput falls # and view changes nonPrimReps = getNonPrimaryReplicas(nodeSet, 0) for r in nonPrimReps: r.node.nodeIbStasher.delay(ppDelay(10, 0)) sendReqsToNodesAndVerifySuffReplies(looper, client1, 4) # Ensure view change happened for both node and its primary elector for node in nodeSet: looper.run( eventually(partial(checkViewChangeInitiatedForNode, node, 0), retryWait=1, timeout=20)) # Ensure elections are done again and pool is setup again with appropriate # protocol instances and each protocol instance is setup properly too checkProtocolInstanceSetup(looper, nodeSet, retryWait=1, timeout=30)
def setupClient(looper: Looper, nodes: Sequence[TestNode] = None, nodeReg=None, tmpdir=None): client1 = genTestClient(nodes=nodes, nodeReg=nodeReg, tmpdir=tmpdir) looper.add(client1) looper.run(client1.ensureConnectedToNodes()) return client1
def checkPoolReady(looper: Looper, nodes: Sequence[TestNode], timeout: int = 20): looper.run( eventually(checkNodesAreReady, nodes, retryWait=.25, timeout=timeout, ratchetSteps=10))
def setupNodesAndClientAndSendRandomReq(looper: Looper, nodes: Sequence[TestNode], nodeReg=None, tmpdir=None): _client = setupNodesAndClient(looper, nodes, nodeReg, tmpdir) request = sendRandomRequest(_client) timeout = 3 * len(nodes) looper.run(eventually(checkSufficientRepliesRecvd, _client.inBox, request.reqId, 1, retryWait=1, timeout=timeout)) return _client, request
async def aSetupClient(looper: Looper, nodes: Sequence[TestNode] = None, nodeReg=None, tmpdir=None): """ async version of above """ client1 = genTestClient(nodes=nodes, nodeReg=nodeReg, tmpdir=tmpdir) looper.add(client1) await client1.ensureConnectedToNodes() return client1
def prepareNodeSet(looper: Looper, nodeSet: TestNodeSet): # TODO: Come up with a more specific name for this for n in nodeSet: n.startKeySharing() # Key sharing party looper.run(checkNodesConnected(nodeSet)) # Remove all the nodes for n in list(nodeSet.nodes.keys()): looper.removeProdable(nodeSet.nodes[n]) nodeSet.removeNode(n, shouldClean=False)
def checkEveryNodeHasAtMostOnePrimary(looper: Looper, nodes: Sequence[TestNode], retryWait: float = None, timeout: float = None): def checkAtMostOnePrim(node): prims = [r for r in node.replicas if r.isPrimary] assert len(prims) <= 1 for node in nodes: looper.run(eventually(checkAtMostOnePrim, node, retryWait=retryWait, timeout=timeout))
def sendReqsToNodesAndVerifySuffReplies(looper: Looper, client: TestClient, numReqs: int, fVal: int=None, timeout: float=None): nodeCount = len(client.nodeReg) fVal = fVal or getMaxFailures(nodeCount) timeout = timeout or 3 * nodeCount requests = sendRandomRequests(client, numReqs) for request in requests: looper.run(eventually(checkSufficientRepliesRecvd, client.inBox, request.reqId, fVal, retryWait=1, timeout=timeout)) return requests
def checkEveryNodeHasAtMostOnePrimary(looper: Looper, nodes: Sequence[TestNode], retryWait: float = None, timeout: float = None): def checkAtMostOnePrim(node): prims = [r for r in node.replicas if r.isPrimary] assert len(prims) <= 1 for node in nodes: looper.run( eventually(checkAtMostOnePrim, node, retryWait=retryWait, timeout=timeout))
def setupClient(looper: Looper, nodes: Sequence[TestNode] = None, nodeReg=None, tmpdir=None, identifier=None, verkey=None): client1, wallet = genTestClient(nodes=nodes, nodeReg=nodeReg, tmpdir=tmpdir, identifier=identifier, verkey=verkey) looper.add(client1) looper.run(client1.ensureConnectedToNodes()) return client1, wallet
def setupNodesAndClientAndSendRandomReq(looper: Looper, nodes: Sequence[TestNode], nodeReg=None, tmpdir=None): _client = setupNodesAndClient(looper, nodes, nodeReg, tmpdir) request = sendRandomRequest(_client) timeout = 3 * len(nodes) looper.run( eventually(checkSufficientRepliesRecvd, _client.inBox, request.reqId, 1, retryWait=1, timeout=timeout)) return _client, request
def nodeCreatedAfterSomeTxns(txnPoolNodeSet, tdirWithPoolTxns, poolTxnStewardData, tconf, allPluginsPath, request): with Looper(debug=True) as looper: client, wallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns, clientClass=TestClient) looper.add(client) looper.run(client.ensureConnectedToNodes()) txnCount = getValueFromModule(request, "txnCount", 5) sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, txnCount, timeoutPerReq=25) newStewardName = randomString() newNodeName = "Epsilon" newStewardClient, newStewardWallet, newNode = addNewStewardAndNode( looper, client, wallet, newStewardName, newNodeName, tdirWithPoolTxns, tconf, allPluginsPath=allPluginsPath, autoStart=True) yield looper, newNode, client, wallet, newStewardClient, \ newStewardWallet
def runAgent(agentClass, name, wallet=None, basedirpath=None, port=None, startRunning=True, bootstrap=False): config = getConfig() if not wallet: wallet = Wallet(name) if not basedirpath: basedirpath = config.baseDir if not port: _, port = genHa() _, clientPort = genHa() client = Client(randomString(6), ha=("0.0.0.0", clientPort), basedirpath=basedirpath) agent = agentClass(basedirpath=basedirpath, client=client, wallet=wallet, port=port) if startRunning: if bootstrap: agent.bootstrap() with Looper(debug=True) as looper: looper.add(agent) logger.debug("Running {} now (port: {})".format(name, port)) looper.run() else: return agent
def testMultipleRequests(tdir_for_func): """ Send multiple requests to the client """ with TestNodeSet(count=7, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: for n in nodeSet: n.startKeySharing() ss0 = snapshotStats(*nodeSet) client, wal = setupNodesAndClient(looper, nodeSet, tmpdir=tdir_for_func) ss1 = snapshotStats(*nodeSet) def x(): requests = [sendRandomRequest(wal, client) for _ in range(10)] for request in requests: looper.run( eventually(checkSufficientRepliesRecvd, client.inBox, request.reqId, 3, retryWait=1, timeout=3 * len(nodeSet))) ss2 = snapshotStats(*nodeSet) diff = statsDiff(ss2, ss1) pprint(ss2) print("----------------------------------------------") pprint(diff) profile_this(x)
def run_node(): nodeReg = OrderedDict([ ('Alpha', ('127.0.0.1', 9701)), ('Beta', ('127.0.0.1', 9703)), ('Gamma', ('127.0.0.1', 9705)), ('Delta', ('127.0.0.1', 9707))]) # the first argument should be the node name try: nodeName = sys.argv[1] except IndexError: names = list(nodeReg.keys()) print("Please supply a node name (one of {}) as the first argument.". format(", ".join(names))) print("For example:") print(" {} {}".format(sys.argv[0], names[0])) return with Looper(debug=False) as looper: # Nodes persist keys when bootstrapping to other nodes and reconnecting # using an ephemeral temporary directory when proving a concept is a # nice way to keep things tidy. with TemporaryDirectory() as tmpdir: node = Node(nodeName, nodeReg, basedirpath=tmpdir) # see simple_client.py joe_verkey = b'cffbb88a142be2f62d1b408818e21a2f' \ b'887c4442ae035a260d4cc2ec28ae24d6' node.clientAuthNr.addClient("Joe", joe_verkey) looper.add(node) node.startKeySharing() looper.run()
def testNodesComingUpAtDifferentTimes(): console = getConsole() console.reinit(flushy=True, verbosity=console.Wordage.verbose) with TemporaryDirectory() as td: print("temporary directory: {}".format(td)) with Looper() as looper: nodes = [] names = list(nodeReg.keys()) shuffle(names) waits = [randint(1, 10) for _ in names] rwaits = [randint(1, 10) for _ in names] for i, name in enumerate(names): node = TestNode(name, nodeReg, basedirpath=td) looper.add(node) node.startKeySharing() nodes.append(node) looper.runFor(waits[i]) looper.run(checkNodesConnected(nodes, overrideTimeout=10)) print("connects") print("node order: {}".format(names)) print("waits: {}".format(waits)) for n in nodes: n.stop() for i, n in enumerate(nodes): n.start(looper.loop) looper.runFor(rwaits[i]) looper.runFor(3) looper.run(checkNodesConnected(nodes, overrideTimeout=10)) print("reconnects") print("node order: {}".format(names)) print("rwaits: {}".format(rwaits))
def testTestNodeDelay(tdir_for_func): nodeNames = {"testA", "testB"} with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodes: nodeA = nodes.getNode("testA") nodeB = nodes.getNode("testB") with Looper(nodes) as looper: for n in nodes: n.startKeySharing() logging.debug("connect") looper.run(checkNodesConnected(nodes)) logging.debug("send one message, without delay") msg = randomMsg() looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 1)) logging.debug("set delay, then send another message and find that " "it doesn't arrive") msg = randomMsg() nodeB.nodeIbStasher.delay(delayerMsgTuple(6, type(msg), nodeA.name)) with pytest.raises(AssertionError): looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 3)) logging.debug("but then find that it arrives after the delay " "duration has passed") looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 4)) logging.debug( "reset the delay, and find another message comes quickly") nodeB.nodeIbStasher.resetDelays() msg = randomMsg() looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 1))
def testReqExecWhenReturnedByMaster(tdir_for_func): with TestNodeSet(count=4, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: for n in nodeSet: n.startKeySharing() client1, wallet1 = setupNodesAndClient(looper, nodeSet, tmpdir=tdir_for_func) req = sendRandomRequest(wallet1, client1) looper.run( eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=15)) async def chk(): for node in nodeSet: entries = node.spylog.getAll(node.processOrdered.__name__) for entry in entries: arg = entry.params['ordered'] result = entry.result if arg.instId == node.instances.masterId: assert result else: assert result is None looper.run(eventually(chk, timeout=3))
def testNodesConnectsWhenOneNodeIsLate(): with TemporaryDirectory() as td: with Looper() as looper: nodes = [] names = list(nodeReg.keys()) logger.debug("Node names: {}".format(names)) def create(name): node = TestNode(name, nodeReg, basedirpath=td) looper.add(node) node.startKeySharing() nodes.append(node) for name in names[:3]: create(name) looper.run(checkNodesConnected(nodes)) # wait for the election to complete with the first three nodes looper.runFor(10) # create the fourth and see that it learns who the primaries are # from the other nodes create(names[3]) checkProtocolInstanceSetup(looper, nodes, timeout=10)
def __init__(self, nodeCount=None, nodeRegistry=None, nodeSet=None, looper=None, tmpdir=None): super().__init__() self.actor = None # type: Organization if nodeSet is None: self.nodes = self.enter_context( TestNodeSet(count=nodeCount, nodeReg=nodeRegistry, tmpdir=tmpdir)) else: self.nodes = nodeSet self.nodeReg = self.nodes.nodeReg if looper is None: self.looper = self.enter_context(Looper(self.nodes)) else: self.looper = looper self.tmpdir = tmpdir self.ran = [] # history of what has been run self.userId = None self.userNym = None self.sponsor = None self.sponsorNym = None self.agent = None self.agentNym = None
def testKeyShareParty(tdir_for_func): """ connections to all nodes should be successfully established when key sharing is enabled. """ nodeReg = genNodeReg(5) logging.debug("-----sharing keys-----") with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: for n in nodeSet: n.startKeySharing() looper.run(checkNodesConnected(nodeSet)) logging.debug("-----key sharing done, connect after key sharing-----") with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as loop: loop.run(checkNodesConnected(nodeSet), msgAll(nodeSet))
def startAgent(name, seed, loop=None): agentWallet = Wallet(name) agentWallet.addIdentifier(signer=SimpleSigner(seed=bytes(seed, 'utf-8'))) agent = createAgent(WalletedAgent, name, wallet=agentWallet, loop=loop) agentPort = agent.endpoint.stackParams['ha'].port with Looper(debug=True) as looper: looper.add(agent) log.debug("Running {} now (port: {})".format(name, agentPort)) return agent
def sendReqsToNodesAndVerifySuffReplies(looper: Looper, client: TestClient, numReqs: int, fVal: int = None, timeout: float = None): nodeCount = len(client.nodeReg) fVal = fVal or getMaxFailures(nodeCount) timeout = timeout or 3 * nodeCount requests = sendRandomRequests(client, numReqs) for request in requests: looper.run( eventually(checkSufficientRepliesRecvd, client.inBox, request.reqId, fVal, retryWait=1, timeout=timeout)) return requests
def testNodesConnectWhenTheyAllStartAtOnce(): with TemporaryDirectory() as td: with Looper() as looper: nodes = [] for name in nodeReg: node = TestNode(name, nodeReg, basedirpath=td) looper.add(node) node.startKeySharing() nodes.append(node) looper.run(checkNodesConnected(nodes))
def testAvgReqLatency(looper: Looper, nodeSet: TestNodeSet, wallet1, client1): """ Checking if average latency is being set """ for i in range(5): req = sendRandomRequest(wallet1, client1) looper.run(eventually(checkSufficientRepliesRecvd, client1.inBox, req.reqId, 1, retryWait=1, timeout=5)) for node in nodeSet: # type: Node mLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, node.instances.masterId) bLat = node.monitor.getAvgLatencyForClient(wallet1.defaultId, *node.instances.backupIds) logger.debug("Avg. master latency : {}. Avg. backup latency: {}". format(mLat, bLat)) assert mLat > 0 assert bLat > 0
def testPostingThroughput(postingStatsEnabled, looper: Looper, nodeSet: TestNodeSet, wallet1, client1): """ The throughput after `DashboardUpdateFreq` seconds and before sending any requests should be zero. Send `n` requests in less than `ThroughputWindowSize` seconds and the throughput till `ThroughputWindowSize` should consider those `n` requests. After `ThroughputWindowSize` seconds the throughput should be zero Test `totalRequests` too. """ # We are sleeping for this window size, because we need to clear previous # values that were being stored for this much time in tests looper.runFor(config.ThroughputWindowSize) reqCount = 10 for node in nodeSet: assert node.monitor.highResThroughput == 0 assert node.monitor.totalRequests == 0 sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, reqCount, nodeSet.f, timeoutPerReq=20) for node in nodeSet: assert len(node.monitor.orderedRequestsInLast) == reqCount assert node.monitor.highResThroughput > 0 assert node.monitor.totalRequests == reqCount # TODO: Add implementation to actually call firebase plugin # and test if firebase plugin is sending total request count # if node is primary looper.runFor(config.DashboardUpdateFreq) for node in nodeSet: node.monitor.spylog.count(Monitor.sendThroughput.__name__) > 0 # Run for latency window duration so that `orderedRequestsInLast` # becomes empty looper.runFor(config.ThroughputWindowSize) def chk(): for node in nodeSet: assert len(node.monitor.orderedRequestsInLast) == 0 assert node.monitor.highResThroughput == 0 assert node.monitor.totalRequests == reqCount looper.run(eventually(chk, retryWait=1, timeout=10))
def txnPoolNodeSet(tdirWithPoolTxns, tconf, poolTxnNodeNames, tdirWithNodeKeepInited): with Looper(debug=True) as looper: nodes = [] for nm in poolTxnNodeNames: node = TestNode(nm, basedirpath=tdirWithPoolTxns, config=tconf) looper.add(node) nodes.append(node) looper.run( eventually(checkNodesConnected, nodes, retryWait=1, timeout=5)) yield nodes
def run_node(): ip = '52.37.111.254' cliNodeReg = OrderedDict([ # ('AlphaC', ('127.0.0.1', 8002)), ('AlphaC', (ip, 4002)), ('BetaC', (ip, 4004)), ('GammaC', (ip, 4006)), ('DeltaC', (ip, 4008)) ]) with Looper(debug=False) as looper: # Nodes persist keys when bootstrapping to other nodes and reconnecting # using an ephemeral temporary directory when proving a concept is a # nice way to keep things clean. clientName = 'Joem' # this seed is used by the signer to deterministically generate # a signature verification key that is shared out of band with the # consensus pool seed = b'g034OTmx7qBRtywvCbKhjfALHnsdcJpl' assert len(seed) == 32 signer = SimpleSigner(seed=seed) assert signer.verstr == 'o7z4QmFkNB+mVkFI2BwX0H' \ 'dm1BGhnz8psWnKYIXWTaQ=' client_address = ('0.0.0.0', 8000) tmpdir = os.path.join(tempfile.gettempdir(), "sovrin_clients", clientName) client = Client(clientName, cliNodeReg, ha=client_address, signer=signer, basedirpath=tmpdir) looper.add(client) # give the client time to connect looper.runFor(3) # a simple message msg = {TXN_TYPE: NYM} # submit the request to the pool request, = client.submit(msg) # allow time for the request to be executed looper.runFor(3) reply, status = client.getReply(request.reqId) print('') print('Reply: {}\n'.format(reply)) print('Status: {}\n'.format(status))
def testElectionsAfterViewChange(delayedPerf, looper: Looper, nodeSet: TestNodeSet, up, client1): """ Test that a primary election does happen after a view change """ # Delay processing of PRE-PREPARE from all non primary replicas of master # so master's throughput falls # and view changes nonPrimReps = getNonPrimaryReplicas(nodeSet, 0) for r in nonPrimReps: r.node.nodeIbStasher.delay(ppDelay(10, 0)) sendReqsToNodesAndVerifySuffReplies(looper, client1, 4) # Ensure view change happened for both node and its primary elector for node in nodeSet: looper.run(eventually(partial(checkViewChangeInitiatedForNode, node, 0), retryWait=1, timeout=20)) # Ensure elections are done again and pool is setup again with appropriate # protocol instances and each protocol instance is setup properly too checkProtocolInstanceSetup(looper, nodeSet, retryWait=1, timeout=30)
def _(subdir, looper=None): def new(): return newCLI(looper, tdir, subDirectory=subdir, conf=tconf, poolDir=tdirWithPoolTxns, domainDir=tdirWithDomainTxns) if looper: yield new() else: with Looper(debug=False) as looper: yield new()
def run(self, coro, nodecount=4): tmpdir = self.fresh_tdir() with self.testNodeSetClass(count=nodecount, tmpdir=tmpdir) as nodeset: with Looper(nodeset) as looper: for n in nodeset: n.startKeySharing() ctx = adict(looper=looper, nodeset=nodeset, tmpdir=tmpdir) looper.run(checkNodesConnected(nodeset)) ensureElectionsDone(looper=looper, nodes=nodeset, retryWait=1, timeout=30) looper.run(coro(ctx))
def runAgent(agent, looper=None, bootstrap=True): def doRun(looper): looper.add(agent) logger.debug("Running {} now (port: {})".format( agent.name, agent.port)) if bootstrap: looper.run(agent.bootstrap()) if looper: doRun(looper) else: with Looper(debug=True, loop=agent.loop) as looper: doRun(looper) looper.run()
def testPostingLatency(postingStatsEnabled, looper: Looper, nodeSet: TestNodeSet, wallet1, client1): """ The latencies (master as well as average of backups) after `DashboardUpdateFreq` seconds and before sending any requests should be zero. Send `n` requests in less than `LatencyWindowSize` seconds and the latency till `LatencyWindowSize` should consider those `n` requests. After `LatencyWindowSize` seconds the latencies should be zero """ # Run for latency window duration so that `latenciesByMasterInLast` and # `latenciesByBackupsInLast` become empty looper.runFor(config.LatencyWindowSize) reqCount = 10 for node in nodeSet: assert node.monitor.masterLatency == 0 assert node.monitor.avgBackupLatency == 0 sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, reqCount, nodeSet.f, timeoutPerReq=20) for node in nodeSet: assert node.monitor.masterLatency > 0 assert node.monitor.avgBackupLatency > 0 looper.runFor(config.DashboardUpdateFreq) for node in nodeSet: node.monitor.spylog.count(Monitor.sendLatencies.__name__) > 0 # Run for latency window duration so that `latenciesByMasterInLast` and # `latenciesByBackupsInLast` become empty looper.runFor(config.LatencyWindowSize) def chk(): for node in nodeSet: assert node.monitor.masterLatency == 0 assert node.monitor.avgBackupLatency == 0 looper.run(eventually(chk, retryWait=1, timeout=10))
def addNodeBack(nodeSet: TestNodeSet, looper: Looper, nodeName: str) -> TestNode: node = nodeSet.addNode(nodeName) looper.add(node) return node