def testNonPrimarySendsAPrePrepare(looper, txnPoolNodeSet, setup, propagated1): nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, instId) firstNpr = nonPrimaryReplicas[0] remainingNpr = nonPrimaryReplicas[1:] def sendPrePrepareFromNonPrimary(): firstNpr._ordering_service.requestQueues[DOMAIN_LEDGER_ID].add( propagated1.key) ppReq = firstNpr._ordering_service.create_3pc_batch(DOMAIN_LEDGER_ID) firstNpr._ordering_service.send_pre_prepare(ppReq) return ppReq ppr = sendPrePrepareFromNonPrimary() def chk(): for r in remainingNpr: recvdPps = recvd_pre_prepares(r) assert len(recvdPps) == 1 assert compareNamedTuple(recvdPps[0], ppr, f.DIGEST.nm, f.STATE_ROOT.nm, f.TXN_ROOT.nm) nodeSuspicions = len( getNodeSuspicions(r.node, Suspicions.PPR_FRM_NON_PRIMARY.code)) assert nodeSuspicions == 1 timeout = waits.expectedClientRequestPropagationTime(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=.5, timeout=timeout))
def testPropagateRecvdBeforeRequest(setup, looper, nodeSet, up, sent1): A, B, C, D = nodeSet.nodes.values() def x(): # A should not have received a request from the client assert len(recvdRequest(A)) == 0 # A should have received only one PROPAGATE assert len(recvdPropagate(A)) == 1 # A should have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec - 2 looper.run(eventually(x, retryWait=.5, timeout=timeout)) def y(): # A should have received a request from the client assert len(recvdRequest(A)) == 1 # A should still have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec + 2 looper.run(eventually(y, retryWait=.5, timeout=timeout)) def chk(): # A should have forwarded the request assertLength(forwardedRequest(A), 1) timeout = waits.expectedClientRequestPropagationTime(len(nodeSet)) + delaySec looper.run(eventually(chk, retryWait=1, timeout=timeout))
async def go(ctx): client1, wallet = genTestClient(ctx.nodeset, tmpdir=ctx.tmpdir) # remove the client's ability to sign assert wallet.defaultId ctx.looper.add(client1) await client1.ensureConnectedToNodes() request = wallet.signOp(op=randomOperation()) request.signature = None request = client1.submitReqs(request)[0][0] timeout = waits.expectedClientRequestPropagationTime(nodeCount) with pytest.raises(AssertionError): for node in ctx.nodeset: await eventually(checkLastClientReqForNode, node, request, retryWait=1, timeout=timeout) for n in ctx.nodeset: params = n.spylog.getLastParams(Node.handleInvalidClientMsg) ex = params['ex'] msg, _ = params['wrappedMsg'] assert isinstance(ex, MissingSignature) assert msg.get(f.IDENTIFIER.nm) == request.identifier params = n.spylog.getLastParams(Node.discard) reason = params["reason"] (msg, frm) = params["msg"] assert msg == request.as_dict assert msg.get(f.IDENTIFIER.nm) == request.identifier assert "MissingSignature" in reason
def testPropagateRecvdBeforeRequest(setup, looper, txnPoolNodeSet, sent1): A, B, C, D = txnPoolNodeSet def x(): # A should not have received a request from the client assert len(recvdRequest(A)) == 0 # A should have received only one PROPAGATE assert len(recvdPropagate(A)) == 1 # A should have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec - 2 looper.run(eventually(x, retryWait=.5, timeout=timeout)) def y(): # A should have received a request from the client assert len(recvdRequest(A)) == 1 # A should still have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec + 2 looper.run(eventually(y, retryWait=.5, timeout=timeout)) def chk(): # A should have forwarded the request assertLength(forwardedRequest(A), 1) timeout = waits.expectedClientRequestPropagationTime( len(txnPoolNodeSet)) + delaySec looper.run(eventually(chk, retryWait=1, timeout=timeout)) auth_obj = A.authNr(0).core_authenticator auth_calling_count = get_count(auth_obj, auth_obj.authenticate) assert auth_calling_count == reqCount
def testOneNodeAltersAClientRequest(looper, txnPoolNodeSet, setup, evilAlpha, sent1): sent1 = sdk_json_to_request_object(sent1[0][0]) checkPropagated(looper, txnPoolNodeSet, sent1, faultyNodes) goodNodes = setup.goodNodes def check(): for node in goodNodes: # ensure the nodes are suspicious of Alpha params = node.spylog.getLastParams(TestNode.reportSuspiciousNode) frm = params["nodeName"] reason = params["reason"] assert frm == 'Alpha' assert reason == InsufficientCorrectSignatures.reason.format(0, 1) # ensure Alpha's propagates were ignored by the other nodes key = sent1.identifier, sent1.reqId props = node.requests[key].propagates assert 'Alpha' not in props for good in goodNodes: assert good.name in props timeout = waits.expectedClientRequestPropagationTime(len(txnPoolNodeSet)) looper.run(eventually(check, retryWait=1, timeout=timeout))
def testNonPrimarySendsAPrePrepare(looper, txnPoolNodeSet, setup, propagated1): nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, instId) firstNpr = nonPrimaryReplicas[0] remainingNpr = nonPrimaryReplicas[1:] def sendPrePrepareFromNonPrimary(): firstNpr.requestQueues[DOMAIN_LEDGER_ID].add(propagated1.key) ppReq = firstNpr.create3PCBatch(DOMAIN_LEDGER_ID) firstNpr.sendPrePrepare(ppReq) return ppReq ppr = sendPrePrepareFromNonPrimary() def chk(): for r in remainingNpr: recvdPps = recvd_pre_prepares(r) assert len(recvdPps) == 1 assert compareNamedTuple(recvdPps[0], ppr, f.DIGEST.nm, f.STATE_ROOT.nm, f.TXN_ROOT.nm) nodeSuspicions = len(getNodeSuspicions( r.node, Suspicions.PPR_FRM_NON_PRIMARY.code)) assert nodeSuspicions == 1 timeout = waits.expectedClientRequestPropagationTime(len(txnPoolNodeSet)) looper.run(eventually(chk, retryWait=.5, timeout=timeout))
def testOneNodeAltersAClientRequest(looper, txnPoolNodeSet, evilAlpha, sdk_pool_handle, sdk_wallet_client): """Malicious Alpha node sends incorrect propagate. This test check that nodes raise InsufficientCorrectSignatures in validate this propagate""" # TODO: This test is throwing a `indy.error.PoolLedgerTerminated` exception # This is probably happening because a request is sent and the pool is terminated before the reply is processed alpha = txnPoolNodeSet[0] goodNodes = list(txnPoolNodeSet) goodNodes.remove(alpha) # delay incoming client messages for good nodes by 250 milliseconds # this gives Alpha a chance to send a propagate message for n in goodNodes: # type: TestNode n.nodeIbStasher.delay(ppDelay(sys.maxsize)) n.nodeIbStasher.delay(req_delay(sys.maxsize)) pastNodes = [] request_couple_json = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) sent1 = sdk_json_to_request_object(request_couple_json[0][0]) checkPropagated(looper, txnPoolNodeSet, sent1, faultyNodes) def check(): for node in goodNodes: if node not in pastNodes: # ensure the nodes are suspicious of Alpha params = node.spylog.getLastParams(TestNode.reportSuspiciousNode) frm = params["nodeName"] reason = params["reason"] assert frm == 'Alpha' invalid_signatures = 'did={}, signature={}'.format(sent1.identifier, sent1.signature) assert reason == InsufficientCorrectSignatures.reason.format(1, 0, 1, invalid_signatures) # ensure Alpha's propagates were ignored by the other nodes key = sent1.digest props = node.requests[key].propagates assert 'Alpha' not in props for good in goodNodes: assert good.name in props pastNodes.append(node) for node in goodNodes: node.nodeIbStasher.resetDelays() timeout = waits.expectedClientRequestPropagationTime(len(txnPoolNodeSet)) looper.run(eventually(check, retryWait=1, timeout=timeout))
def testOneNodeAltersAClientRequest(looper, txnPoolNodeSet, evilAlpha, sdk_pool_handle, sdk_wallet_client): """Malicious Alpha node sends incorrect propagate. This test check that nodes raise InsufficientCorrectSignatures in validate this propagate""" alpha = txnPoolNodeSet[0] goodNodes = list(txnPoolNodeSet) goodNodes.remove(alpha) # delay incoming client messages for good nodes by 250 milliseconds # this gives Alpha a chance to send a propagate message for n in goodNodes: # type: TestNode n.nodeIbStasher.delay(ppDelay(sys.maxsize)) n.nodeIbStasher.delay(req_delay(sys.maxsize)) pastNodes = [] request_couple_json = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1) sent1 = sdk_json_to_request_object(request_couple_json[0][0]) checkPropagated(looper, txnPoolNodeSet, sent1, faultyNodes) def check(): for node in goodNodes: if node not in pastNodes: # ensure the nodes are suspicious of Alpha params = node.spylog.getLastParams(TestNode.reportSuspiciousNode) frm = params["nodeName"] reason = params["reason"] assert frm == 'Alpha' assert reason == InsufficientCorrectSignatures.reason.format(0, 1) # ensure Alpha's propagates were ignored by the other nodes key = sent1.digest props = node.requests[key].propagates assert 'Alpha' not in props for good in goodNodes: assert good.name in props pastNodes.append(node) for node in goodNodes: node.nodeIbStasher.resetDelays() timeout = waits.expectedClientRequestPropagationTime(len(txnPoolNodeSet)) looper.run(eventually(check, retryWait=1, timeout=timeout))
def testMsgFromInstanceDelay(configNodeSet, looper, prepared1): A, B, C, D = configNodeSet def getCommits(node: TestNode, instId: int): replica = node.replicas[instId] # type: Replica return list(replica.commits.values()) def checkPresence(): for node in [C, D]: commReqs = getCommits(node, 0) assert len(commReqs) > 0 assert Replica.generateName(A.name, 0) not in commReqs[0][0] commReqs = getCommits(node, 1) assert len(commReqs) > 0 assert Replica.generateName(A.name, 1) in commReqs[0][0] numOfNodes = len(configNodeSet) timeout = waits.expectedClientRequestPropagationTime(numOfNodes) looper.run(eventually(checkPresence, retryWait=.5, timeout=timeout))
def testSendRequestWithoutSignatureFails(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ A client request sent without a signature fails with an EmptySignature exception """ # remove the client's ability to sign requests = sdk_signed_random_requests(looper, sdk_wallet_client, 1) json_req = json.loads(requests[0]) json_req['signature'] = None request = json.dumps(json_req) res = sdk_send_signed_requests(sdk_pool_handle, [request]) obj_req = sdk_json_to_request_object(res[0][0]) timeout = waits.expectedClientRequestPropagationTime(nodeCount) with pytest.raises(AssertionError): for node in txnPoolNodeSet: looper.loop.run_until_complete( eventually(checkLastClientReqForNode, node, obj_req, retryWait=1, timeout=timeout)) for n in txnPoolNodeSet: params = n.spylog.getLastParams(Node.handleInvalidClientMsg) ex = params['ex'] msg, _ = params['wrappedMsg'] assert isinstance(ex, MissingSignature) assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier params = n.spylog.getLastParams(Node.discard) reason = params["reason"] (msg, frm) = params["msg"] assert msg == json_req assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier assert "MissingSignature" in reason
def testMsgFromInstanceDelay(configNodeSet, looper, sdk_pool_handle, sdk_wallet_client): A, B, C, D = configNodeSet sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) def getCommits(node: TestNode, instId: int): replica = node.replicas[instId] # type: Replica return list(replica.commits.values()) def checkPresence(): for node in [C, D]: commReqs = getCommits(node, 0) assert len(commReqs) > 0 assert Replica.generateName(A.name, 0) not in commReqs[0][0] commReqs = getCommits(node, 1) assert len(commReqs) > 0 assert Replica.generateName(A.name, 1) in commReqs[0][0] numOfNodes = len(configNodeSet) timeout = waits.expectedClientRequestPropagationTime(numOfNodes) looper.run(eventually(checkPresence, retryWait=.5, timeout=timeout))
def testSendRequestWithoutSignatureFails(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ A client request sent without a signature fails with an EmptySignature exception """ # remove the client's ability to sign requests = sdk_signed_random_requests(looper, sdk_wallet_client, 1) json_req = json.loads(requests[0]) json_req['signature'] = None request = json.dumps(json_req) res = sdk_send_signed_requests(sdk_pool_handle, [request]) obj_req = sdk_json_to_request_object(res[0][0]) timeout = waits.expectedClientRequestPropagationTime(nodeCount) with pytest.raises(AssertionError): for node in txnPoolNodeSet: looper.loop.run_until_complete(eventually( checkLastClientReqForNode, node, obj_req, retryWait=1, timeout=timeout)) for n in txnPoolNodeSet: params = n.spylog.getLastParams(Node.handleInvalidClientMsg) ex = params['ex'] msg, _ = params['wrappedMsg'] assert isinstance(ex, MissingSignature) assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier params = n.spylog.getLastParams(Node.discard) reason = params["reason"] (msg, frm) = params["msg"] assert msg == json_req assert msg.get(f.IDENTIFIER.nm) == obj_req.identifier assert "MissingSignature" in reason
def test_view_change_gc_in_between_3pc_all_nodes_delays( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo) and does not remove messages from node's queues that have higher viewNo than last ordered one even if their ppSeqNo are less or equal """ numNodes = len(txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet) # 1 send two messages one by one separately to make # node pool working with two batches # -> last_ordered_3pc = (+0, 2) [+0 means from the initial state] # (last_ordered_3pc here and futher is tracked # for master instances only cause non-master ones have # specific logic of its management which we don't care in # the test, see Replica::_setup_for_non_master) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) last_ordered_3pc = (viewNo, 2) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 2) # 2 do view change # -> GC should remove it from nodes' queues # -> viewNo = +1 ensure_view_change_complete(looper, txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 0) # 3 slow processing 3PC messages for all nodes (all replica instances) # randomly and send one more message # -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries # should at least send PRE-PREPAREs # TODO could it be not enough for wainting that at least primary # has sent PRE-PREPARE propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes) delay_3pc_messages(txnPoolNodeSet, 0, delay=propagationTimeout * 2) delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2) requests = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) def checkPrePrepareSentAtLeastByPrimary(): for node in txnPoolNodeSet: for replica in node.replicas.values(): if replica.isPrimary: assert len(replica.sentPrePrepares) looper.run(eventually(checkPrePrepareSentAtLeastByPrimary, retryWait=0.1, timeout=propagationTimeout)) # 4 do view change # -> GC shouldn't remove anything because # last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1) # -> viewNo = 2 ensure_view_change_complete(looper, txnPoolNodeSet) viewNoNew = checkViewNoForNodes(txnPoolNodeSet) # another view change could happen because of slow nodes assert viewNoNew - viewNo in (1, 2) viewNo = viewNoNew check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 1) # 5 reset delays and wait for replies # -> new primaries should send new 3pc for last message # with 3pc key (+2, 1) # -> they should be ordered # -> last_ordered_3pc = (+2, 1) reset_delays_and_process_delayeds(txnPoolNodeSet) sdk_get_replies(looper, [requests]) checkViewNoForNodes(txnPoolNodeSet, viewNo) last_ordered_3pc = (viewNo, 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 1) # 6 do view change # -> GC should remove them ensure_view_change_complete(looper, txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 0)
def test_view_change_gc_in_between_3pc_all_nodes_delays( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): """ Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo) and does not remove messages from node's queues that have higher viewNo than last ordered one even if their ppSeqNo are less or equal """ numNodes = len(txnPoolNodeSet) viewNo = checkViewNoForNodes(txnPoolNodeSet) # 1 send two messages one by one separately to make # node pool working with two batches # -> last_ordered_3pc = (+0, 2) [+0 means from the initial state] # (last_ordered_3pc here and futher is tracked # for master instances only cause non-master ones have # specific logic of its management which we don't care in # the test, see Replica::_setup_for_non_master) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1) batches_count = get_pp_seq_no(txnPoolNodeSet) last_ordered_3pc = (viewNo, batches_count) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 2) # 2 do view change # -> GC should remove it from nodes' queues # -> viewNo = +1 ensure_view_change_complete(looper, txnPoolNodeSet) batches_count += 1 viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) looper.run( eventually(check_nodes_last_ordered_3pc, txnPoolNodeSet, (viewNo, batches_count))) check_nodes_requests_size(txnPoolNodeSet, 0) # 3 slow processing 3PC messages for all nodes (all replica instances) # randomly and send one more message # -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries # should at least send PRE-PREPAREs # TODO could it be not enough for wainting that at least primary # has sent PRE-PREPARE propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes) delay_3pc_messages(txnPoolNodeSet, 0, delay=propagationTimeout * 2) delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2) requests = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client) def checkPrePrepareSentAtLeastByPrimary(): for node in txnPoolNodeSet: for replica in node.replicas.values(): if replica.isPrimary: assert len(replica._ordering_service.sent_preprepares) looper.run( eventually(checkPrePrepareSentAtLeastByPrimary, retryWait=0.1, timeout=propagationTimeout)) # 4 do view change # -> GC shouldn't remove anything because # last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1) # -> viewNo = 2 ensure_view_change_complete(looper, txnPoolNodeSet) batches_count += 1 viewNoNew = checkViewNoForNodes(txnPoolNodeSet) # another view change could happen because of slow nodes assert viewNoNew - viewNo in (1, 2) viewNo = viewNoNew check_nodes_last_ordered_3pc(txnPoolNodeSet, (last_ordered_3pc[0] + 1, batches_count - 1)) check_nodes_requests_size(txnPoolNodeSet, 1) # 5 reset delays and wait for replies # -> new primaries should send new 3pc for last message # with 3pc key (+2, 1) # -> they should be ordered # -> last_ordered_3pc = (+2, 1) reset_delays_and_process_delayeds(txnPoolNodeSet) sdk_get_replies(looper, [requests]) batches_count += 1 checkViewNoForNodes(txnPoolNodeSet, viewNo) last_ordered_3pc = (viewNo, batches_count) check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc) check_nodes_requests_size(txnPoolNodeSet, 1) # 6 do view change # -> GC should remove them ensure_view_change_complete(looper, txnPoolNodeSet) batches_count += 1 viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1) check_nodes_last_ordered_3pc(txnPoolNodeSet, (last_ordered_3pc[0] + 1, batches_count)) check_nodes_requests_size(txnPoolNodeSet, 0)
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func): nodeNames = genNodeNames(4) nodeReg = genNodeReg(names=nodeNames) with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: prepareNodeSet(looper, nodeSet) nodeA, nodeB, nodeC, nodeD = tuple( addNodeBack(nodeSet, looper, nodeNames[i]) for i in range(0, 4)) # Since primary selection is round robin, A and B will be primaries # Nodes C and D delays self nomination so A and B can become # primaries # nodeC.delaySelfNomination(10) # nodeD.delaySelfNomination(10) # Node D delays receiving PRIMARY messages from all nodes so it # will not know whether it is primary or not # delayD = 5 # nodeD.nodeIbStasher.delay(delayerMsgTuple(delayD, Primary)) checkPoolReady(looper=looper, nodes=nodeSet) # client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func) # request = sendRandomRequest(wal, client1) # TODO Rethink this instNo = 0 timeout = waits.expectedClientRequestPropagationTime(len(nodeSet)) for i in range(3): node = nodeSet.getNode(nodeNames[i]) # Nodes A, B and C should have received PROPAGATE request # from Node D looper.run( eventually(checkIfPropagateRecvdFromNode, node, nodeD, request.identifier, request.reqId, retryWait=1, timeout=timeout)) def assert_msg_count(typ, count): assert len( getPendingRequestsForReplica(nodeD.replicas[instNo], typ)) == count # Node D should have 1 pending PRE-PREPARE request timeout = waits.expectedPrePrepareTime(len(nodeSet)) looper.run( eventually(assert_msg_count, PrePrepare, 1, retryWait=1, timeout=timeout)) # Node D should have 2 pending PREPARE requests(from node B and C) timeout = waits.expectedPrepareTime(len(nodeSet)) looper.run( eventually(assert_msg_count, Prepare, 2, retryWait=1, timeout=timeout)) # Its been checked above that replica stashes 3 phase messages in # lack of primary, now avoid delay (fix the network) nodeD.nodeIbStasher.reset_delays_and_process_delayeds() # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT # requests for reqType in [PrePrepare, Prepare, Commit]: looper.run( eventually(lambda: assertLength( getPendingRequestsForReplica(nodeD.replicas[instNo], reqType), 0), retryWait=1, timeout=delayD)) # wait little more than delay
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func): nodeNames = genNodeNames(4) nodeReg = genNodeReg(names=nodeNames) with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet: with Looper(nodeSet) as looper: prepareNodeSet(looper, nodeSet) nodeA, nodeB, nodeC, nodeD = tuple( addNodeBack( nodeSet, looper, nodeNames[i]) for i in range( 0, 4)) # Since primary selection is round robin, A and B will be primaries # Nodes C and D delays self nomination so A and B can become # primaries # nodeC.delaySelfNomination(10) # nodeD.delaySelfNomination(10) # Node D delays receiving PRIMARY messages from all nodes so it # will not know whether it is primary or not # delayD = 5 # nodeD.nodeIbStasher.delay(delayerMsgTuple(delayD, Primary)) checkPoolReady(looper=looper, nodes=nodeSet) # client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func) # request = sendRandomRequest(wal, client1) # TODO Rethink this instNo = 0 timeout = waits.expectedClientRequestPropagationTime(len(nodeSet)) for i in range(3): node = nodeSet.getNode(nodeNames[i]) # Nodes A, B and C should have received PROPAGATE request # from Node D looper.run( eventually(checkIfPropagateRecvdFromNode, node, nodeD, request.identifier, request.reqId, retryWait=1, timeout=timeout)) def assert_msg_count(typ, count): assert len(getPendingRequestsForReplica(nodeD.replicas[instNo], typ)) == count # Node D should have 1 pending PRE-PREPARE request timeout = waits.expectedPrePrepareTime(len(nodeSet)) looper.run(eventually(assert_msg_count, PrePrepare, 1, retryWait=1, timeout=timeout)) # Node D should have 2 pending PREPARE requests(from node B and C) timeout = waits.expectedPrepareTime(len(nodeSet)) looper.run(eventually(assert_msg_count, Prepare, 2, retryWait=1, timeout=timeout)) # Its been checked above that replica stashes 3 phase messages in # lack of primary, now avoid delay (fix the network) nodeD.nodeIbStasher.reset_delays_and_process_delayeds() # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT # requests for reqType in [PrePrepare, Prepare, Commit]: looper.run( eventually( lambda: assertLength( getPendingRequestsForReplica( nodeD.replicas[instNo], reqType), 0), retryWait=1, timeout=delayD)) # wait little more than delay