def ensureUpgradeSent(looper, trustee, trusteeWallet, upgradeData): upgrade, req = sendUpgrade(trustee, trusteeWallet, upgradeData) checkSufficientRepliesForRequests(looper, trustee, [req, ], timeoutPerReq=10) def check(): assert trusteeWallet.getPoolUpgrade(upgrade.key).seqNo looper.run(eventually(check, retryWait=1, timeout=5)) return upgrade
def nodeThetaAdded(looper, nodeSet, tdirWithPoolTxns, tconf, steward, stewardWallet, allPluginsPath, testNodeClass, testClientClass, tdir): newStewardName = "testClientSteward" + randomString(3) newNodeName = "Theta" newSteward, newStewardWallet = getClientAddedWithRole( nodeSet, tdir, looper, steward, stewardWallet, newStewardName, STEWARD) sigseed = randomString(32).encode() nodeSigner = SimpleSigner(seed=sigseed) (nodeIp, nodePort), (clientIp, clientPort) = genHa(2) data = { NODE_IP: nodeIp, NODE_PORT: nodePort, CLIENT_IP: clientIp, CLIENT_PORT: clientPort, ALIAS: newNodeName, SERVICES: [ VALIDATOR, ] } node = Node(nodeSigner.identifier, data, newStewardWallet.defaultId) newStewardWallet.addNode(node) reqs = newStewardWallet.preparePending() req, = newSteward.submitReqs(*reqs) checkSufficientRepliesForRequests(looper, newSteward, [ req, ]) def chk(): assert newStewardWallet.getNode(node.id).seqNo is not None looper.run(eventually(chk, retryWait=1, timeout=10)) initLocalKeep(newNodeName, tdirWithPoolTxns, sigseed, override=True) newNode = testNodeClass(newNodeName, basedirpath=tdir, config=tconf, ha=(nodeIp, nodePort), cliha=(clientIp, clientPort), pluginPaths=allPluginsPath) nodeSet.append(newNode) looper.add(newNode) looper.run(checkNodesConnected(nodeSet)) ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward, *nodeSet) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward, *nodeSet) return newSteward, newStewardWallet, newNode
def testNodeRejectsPoolUpgrade(looper, nodeSet, tdir, trustee, trusteeWallet, invalidUpgrade): _, req = sendUpgrade(trustee, trusteeWallet, invalidUpgrade) with pytest.raises(AssertionError): checkSufficientRepliesForRequests(looper, trustee, [ req, ], timeoutPerReq=10) looper.run( eventually(checkNacks, trustee, req.reqId, 'since time span between upgrades'))
def testNonTrustyCannotCancelUpgrade(validUpgradeSent, looper, nodeSet, steward, validUpgrade): stClient, stWallet = steward validUpgrade = deepcopy(validUpgrade) validUpgrade[ACTION] = CANCEL _, req = sendUpgrade(stClient, stWallet, validUpgrade) with pytest.raises(AssertionError): checkSufficientRepliesForRequests(looper, stClient, [ req, ], timeoutPerReq=10) looper.run(eventually(checkNacks, stClient, req.reqId, 'cannot do'))
def testClientConnectToRestartedNodes(looper, txnPoolNodeSet, tdirWithPoolTxns, poolTxnClientNames, poolTxnData, tconf, poolTxnNodeNames, allPluginsPath): name = poolTxnClientNames[-1] seed = poolTxnData["seeds"][name] newClient, w = genTestClient(tmpdir=tdirWithPoolTxns, nodes=txnPoolNodeSet, name=name, usePoolLedger=True) looper.add(newClient) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1) for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # looper.run(newClient.ensureDisconnectedToNodes(timeout=60)) txnPoolNodeSet = [] for nm in poolTxnNodeNames: node = TestNode(nm, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath) looper.add(node) txnPoolNodeSet.append(node) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, retryWait=1, timeout=10) def chk(): for node in txnPoolNodeSet: assert node.isParticipating looper.run(eventually(chk, retryWait=1, timeout=10)) bootstrapClientKeys(w.defaultId, w.getVerkey(), txnPoolNodeSet) req = sendRandomRequest(w, newClient) checkSufficientRepliesForRequests(looper, newClient, [ req, ], timeoutPerReq=10) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1)
def testOnlyTrusteeCanSendPoolUpgrade(validUpgradeSent, looper, steward, validUpgrade): # A steward sending POOL_UPGRADE but txn fails stClient, stWallet = steward validUpgrade = deepcopy(validUpgrade) validUpgrade[NAME] = 'upgrade-20' validUpgrade[VERSION] = bumpedVersion() _, req = sendUpgrade(stClient, stWallet, validUpgrade) with pytest.raises(AssertionError): checkSufficientRepliesForRequests(looper, stClient, [ req, ], timeoutPerReq=10) looper.run(eventually(checkNacks, stClient, req.reqId, 'cannot do'))
def testTrustyCancelsUpgrade(validUpgradeSent, looper, nodeSet, trustee, trusteeWallet, validUpgrade): validUpgrade = deepcopy(validUpgrade) validUpgrade[ACTION] = CANCEL validUpgrade.pop(SCHEDULE, None) upgrade, req = sendUpgrade(trustee, trusteeWallet, validUpgrade) checkSufficientRepliesForRequests(looper, trustee, [ req, ], timeoutPerReq=10) def check(): assert trusteeWallet._upgrades[upgrade.key].seqNo looper.run(eventually(check, timeout=4)) looper.run( eventually(checkNoUpgradeScheduled, nodeSet, retryWait=1, timeout=10))
def testClientConnectToRestartedNodes(looper, txnPoolNodeSet, tdirWithPoolTxns, poolTxnClientNames, poolTxnData, tconf, poolTxnNodeNames, allPluginsPath): name = poolTxnClientNames[-1] seed = poolTxnData["seeds"][name] newClient, w = genTestClient(tmpdir=tdirWithPoolTxns, nodes=txnPoolNodeSet, name=name, usePoolLedger=True) looper.add(newClient) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1) for node in txnPoolNodeSet: node.stop() looper.removeProdable(node) # looper.run(newClient.ensureDisconnectedToNodes(timeout=60)) txnPoolNodeSet = [] for nm in poolTxnNodeNames: node = TestNode(nm, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath) looper.add(node) txnPoolNodeSet.append(node) looper.run(checkNodesConnected(txnPoolNodeSet)) ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, retryWait=1, timeout=10) def chk(): for node in txnPoolNodeSet: assert node.isParticipating looper.run(eventually(chk, retryWait=1, timeout=10)) bootstrapClientKeys(w.defaultId, w.getVerkey(), txnPoolNodeSet) req = sendRandomRequest(w, newClient) checkSufficientRepliesForRequests(looper, newClient, [req, ], timeoutPerReq=10) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newClient, *txnPoolNodeSet) sendReqsToNodesAndVerifySuffReplies(looper, w, newClient, 1, 1)
def testTrustyCancelsUpgrade(validUpgradeSent, looper, nodeSet, trustee, trusteeWallet, validUpgrade): validUpgrade = deepcopy(validUpgrade) validUpgrade[ACTION] = CANCEL validUpgrade[JUSTIFICATION] = '"not gonna give you one"' validUpgrade.pop(SCHEDULE, None) upgrade, req = sendUpgrade(trustee, trusteeWallet, validUpgrade) checkSufficientRepliesForRequests(looper, trustee, [ req, ], timeoutPerReq=10) def check(): assert trusteeWallet.getPoolUpgrade(upgrade.key).seqNo looper.run(eventually(check, retryWait=1, timeout=5)) looper.run( eventually(checkNoUpgradeScheduled, nodeSet, retryWait=1, timeout=10))
def testOrderingCase1(looper, nodeSet, up, client1, wallet1): """ Scenario -> A client sends requests, some nodes delay COMMITs to few specific nodes such some nodes achieve commit quorum later for those requests compared to other nodes. But all nodes `ORDER` request in the same order of ppSeqNos https://www.pivotaltracker.com/n/projects/1889887/stories/133655009 """ pr, replicas = getPrimaryReplica(nodeSet, instId=0), \ getNonPrimaryReplicas(nodeSet, instId=0) assert len(replicas) == 6 rep0 = pr rep1 = replicas[0] rep2 = replicas[1] rep3 = replicas[2] rep4 = replicas[3] rep5 = replicas[4] rep6 = replicas[5] node0 = rep0.node node1 = rep1.node node2 = rep2.node node3 = rep3.node node4 = rep4.node node5 = rep5.node node6 = rep6.node requests = sendRandomRequests(wallet1, client1, 15) ppSeqsToDelay = 5 delayedPpSeqNos = set() def specificCommits(wrappedMsg): nonlocal node3, node4, node5 msg, sender = wrappedMsg if isinstance(msg, PrePrepare): if len(delayedPpSeqNos) < ppSeqsToDelay: delayedPpSeqNos.add(msg.ppSeqNo) logger.debug('ppSeqNo {} corresponding to request id {} would ' 'be delayed'.format(msg.ppSeqNo, msg.reqId)) if isinstance(msg, Commit) and msg.instId == 0 and \ sender in (n.name for n in (node3, node4, node5)) and \ msg.ppSeqNo in delayedPpSeqNos: return 3 for node in (node1, node2): logger.debug('{} would be delaying commits'.format(node)) node.nodeIbStasher.delay(specificCommits) checkSufficientRepliesForRequests(looper, client1, requests) def ensureSlowNodesHaveAllTxns(): nonlocal node1, node2 for node in node1, node2: assert len(node.domainLedger) == 15 looper.run(eventually(ensureSlowNodesHaveAllTxns, retryWait=1, timeout=15)) checkAllLedgersEqual((n.domainLedger for n in (node0, node3, node4, node5, node6))) for node in (node1, node2): for n in nodeSet: if n != node: checkLedgerEquality(node.domainLedger, n.domainLedger) checkAllLedgersEqual((n.domainLedger for n in nodeSet))
def testStewardSuspendsNode(looper, txnPoolNodeSet, tdirWithPoolTxns, tconf, steward1, stewardWallet, nodeThetaAdded, poolTxnStewardData, allPluginsPath): newSteward, newStewardWallet, newNode = nodeThetaAdded newNodeNym = hexToFriendly(newNode.nodestack.local.signer.verhex) suspendNode(looper, newSteward, newStewardWallet, newNodeNym, newNode.name) # Check suspended node does not exist in any nodeReg or remotes of # nodes or clients txnPoolNodeSet = txnPoolNodeSet[:-1] for node in txnPoolNodeSet: looper.run(eventually(checkNodeNotInNodeReg, node, newNode.name)) for client in (steward1, newSteward): looper.run(eventually(checkNodeNotInNodeReg, client, newNode.name)) # Check a client can send request and receive replies req = sendRandomRequest(newStewardWallet, newSteward) checkSufficientRepliesForRequests(looper, newSteward, [ req, ], timeoutPerReq=10) # Check that a restarted client or node does not connect to the suspended # node steward1.stop() looper.removeProdable(steward1) steward1, stewardWallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns) looper.add(steward1) ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1, *txnPoolNodeSet) looper.run(eventually(checkNodeNotInNodeReg, steward1, newNode.name)) newNode.stop() looper.removeProdable(newNode) # TODO: There is a bug that if a primary node is turned off, it sends # duplicate Pre-Prepare and gets blacklisted. Here is the gist # https://gist.github.com/lovesh/c16989616ebb6856f9fa2905c14dc4b7 oldNodeIdx, oldNode = [(i, n) for i, n in enumerate(txnPoolNodeSet) if not n.hasPrimary][0] oldNode.stop() looper.removeProdable(oldNode) oldNode = TestNode(oldNode.name, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath) looper.add(oldNode) txnPoolNodeSet[oldNodeIdx] = oldNode looper.run(checkNodesConnected(txnPoolNodeSet)) looper.run(eventually(checkNodeNotInNodeReg, oldNode, newNode.name)) # Check that a node whose suspension is revoked can reconnect to other # nodes and clients can also connect to that node cancelNodeSuspension(looper, newSteward, newStewardWallet, newNodeNym, newNode.name) nodeTheta = TestNode(newNode.name, basedirpath=tdirWithPoolTxns, config=tconf, pluginPaths=allPluginsPath, ha=newNode.nodestack.ha, cliha=newNode.clientstack.ha) looper.add(nodeTheta) txnPoolNodeSet.append(nodeTheta) looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=30)) ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward1, *txnPoolNodeSet) ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward, *txnPoolNodeSet)
def testOrderingCase2(looper, nodeSet, up, client1, wallet1): """ Scenario -> A client sends requests, some nodes delay COMMITs to few specific nodes such some nodes achieve commit quorum later for those requests compared to other nodes. But all nodes `ORDER` request in the same order of ppSeqNos https://www.pivotaltracker.com/n/projects/1889887/stories/133655009 """ pr, replicas = getPrimaryReplica(nodeSet, instId=0), \ getNonPrimaryReplicas(nodeSet, instId=0) assert len(replicas) == 6 rep0 = pr rep1 = replicas[0] rep2 = replicas[1] rep3 = replicas[2] rep4 = replicas[3] rep5 = replicas[4] rep6 = replicas[5] node0 = rep0.node node1 = rep1.node node2 = rep2.node node3 = rep3.node node4 = rep4.node node5 = rep5.node node6 = rep6.node ppSeqsToDelay = 5 commitDelay = 3 # delay each COMMIT by this number of seconds delayedPpSeqNos = set() requestCount = 15 requests = sendRandomRequests(wallet1, client1, requestCount) def specificCommits(wrappedMsg): nonlocal node3, node4, node5 msg, sender = wrappedMsg if isinstance(msg, PrePrepare): if len(delayedPpSeqNos) < ppSeqsToDelay: delayedPpSeqNos.add(msg.ppSeqNo) logger.debug('ppSeqNo {} corresponding to request id {} would ' 'be delayed'.format(msg.ppSeqNo, msg.reqId)) if isinstance(msg, Commit) and msg.instId == 0 and \ sender in (n.name for n in (node3, node4, node5)) and \ msg.ppSeqNo in delayedPpSeqNos: return commitDelay for node in (node1, node2): logger.debug('{} would be delaying commits'.format(node)) node.nodeIbStasher.delay(specificCommits) checkSufficientRepliesForRequests(looper, client1, requests) def ensureSlowNodesHaveAllTxns(): nonlocal node1, node2 for node in node1, node2: assert len(node.domainLedger) == requestCount looper.run(eventually(ensureSlowNodesHaveAllTxns, retryWait=1, timeout=15)) checkAllLedgersEqual((n.domainLedger for n in (node0, node3, node4, node5, node6))) for node in (node1, node2): for n in nodeSet: if n != node: checkLedgerEquality(node.domainLedger, n.domainLedger) checkAllLedgersEqual((n.domainLedger for n in nodeSet))