def test_node_handles_forced_upgrade_on_propagate(looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee, validUpgradeExpForceTrue): """ Verifies that POOL_UPGRADE force=true request is handled immediately when the node receives it in a PROPAGATE from any other node """ slow_node = getNonPrimaryReplicas(nodeSet, instId=0)[-1].node # Stash all except PROPAGATEs from Gamma slow_node.clientIbStasher.delay(req_delay()) slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Alpha')) slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Beta')) slow_node.nodeIbStasher.delay(ppDelay()) slow_node.nodeIbStasher.delay(pDelay()) slow_node.nodeIbStasher.delay(cDelay()) sdk_send_upgrade(looper, sdk_pool_handle, sdk_wallet_trustee, validUpgradeExpForceTrue) looper.run( eventually(checkUpgradeScheduled, [slow_node], validUpgradeExpForceTrue[VERSION], retryWait=1, timeout=waits.expectedUpgradeScheduled()))
def test_belated_propagate_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_signed_requests(sdk_pool_handle, one_req) looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) delta.nodeIbStasher.reset_delays_and_process_delayeds('PROPAGATE') looper.runFor(waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: node.nodeIbStasher.reset_delays_and_process_delayeds('COMMIT') looper.runFor(waits.expectedOrderingTime(delta.replicas.num_replicas)) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def setup(request, txnPoolNodeSet): # Test once when client request is received and once when not received # Choosing a faulty node which is primary in neither instance, this helps # in the that same PROPAGATEs are not requested again by the node faulty_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[1].node if request.param == 'client_requests': # Long delay in PROPAGATEs faulty_node.nodeIbStasher.delay(ppgDelay(90)) return faulty_node, True if request.param == 'no_client_requests': # Long delay in PROPAGATEs faulty_node.nodeIbStasher.delay(ppgDelay(90)) # Long delay in Client Requests faulty_node.clientIbStasher.delay(req_delay(90)) return faulty_node, False
def test_no_preprepare_requested(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, teardown): """ Node missing Propagates hence request not finalised, hence stashes PRE-PREPARE but does not request PRE-PREPARE on receiving PREPARE """ slow_node, other_nodes, _, _ = split_nodes(txnPoolNodeSet) slow_node.nodeIbStasher.delay(ppgDelay()) slow_node.clientIbStasher.delay(req_delay()) slow_node.nodeIbStasher.delay(msg_rep_delay(1000, [ PROPAGATE, ])) old_count_resp = count_requested_preprepare_resp(slow_node) sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs=4, num_batches=2) # The slow node is behind checkNodeDataForInequality(slow_node, *other_nodes) # PRE-PREPARE were not requested assert count_requested_preprepare_resp(slow_node) == old_count_resp slow_node.nodeIbStasher.reset_delays_and_process_delayeds() # The slow node has processed all requests waitNodeDataEquality(looper, slow_node, *other_nodes) # PRE-PREPARE were not requested assert count_requested_preprepare_resp(slow_node) == old_count_resp
def test_no_preprepare_requested(looper, txnPoolNodeSet, client1, wallet1, client1Connected, teardown): """ Node missing Propagates hence request not finalised, hence stashes PRE-PREPARE but does not request PRE-PREPARE on receiving PREPARE """ slow_node, other_nodes, _, _ = split_nodes(txnPoolNodeSet) slow_node.nodeIbStasher.delay(ppgDelay(20)) slow_node.nodeIbStasher.delay(msg_rep_delay(20, [PROPAGATE, ])) old_count_resp = count_requested_preprepare_resp(slow_node) send_reqs_batches_and_get_suff_replies(looper, wallet1, client1, 4, 2) # The slow node is behind checkNodeDataForInequality(slow_node, *other_nodes) # PRE-PREPARE were not requested assert count_requested_preprepare_resp(slow_node) == old_count_resp slow_node.nodeIbStasher.reset_delays_and_process_delayeds() # The slow node has processed all requests waitNodeDataEquality(looper, slow_node, *other_nodes) # PRE-PREPARE were not requested assert count_requested_preprepare_resp(slow_node) == old_count_resp
def test_no_preprepare_requested(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, teardown): """ Node missing Propagates hence request not finalised, hence stashes PRE-PREPARE but does not request PRE-PREPARE on receiving PREPARE """ slow_node, other_nodes, _, _ = split_nodes(txnPoolNodeSet) slow_node.nodeIbStasher.delay(ppgDelay(20)) slow_node.nodeIbStasher.delay(msg_rep_delay(20, [PROPAGATE, ])) old_count_resp = count_requested_preprepare_resp(slow_node) sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, num_reqs=4, num_batches=2) # The slow node is behind checkNodeDataForInequality(slow_node, *other_nodes) # PRE-PREPARE were not requested assert count_requested_preprepare_resp(slow_node) == old_count_resp slow_node.nodeIbStasher.reset_delays_and_process_delayeds() # The slow node has processed all requests waitNodeDataEquality(looper, slow_node, *other_nodes) # PRE-PREPARE were not requested assert count_requested_preprepare_resp(slow_node) == old_count_resp
def test_belated_propagate_not_processed_if_already_in_3pc_process( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) for node in txnPoolNodeSet: node.nodeIbStasher.delay(cDelay(300)) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_signed_requests(sdk_pool_handle, one_req) looper.runFor( waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) delta.nodeIbStasher.reset_delays_and_process_delayeds('PROPAGATE') looper.runFor( waits.expectedPropagateTime(len(txnPoolNodeSet)) + waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + waits.expectedPrepareTime(len(txnPoolNodeSet)) + waits.expectedCommittedTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: node.nodeIbStasher.reset_delays_and_process_delayeds('COMMIT') looper.runFor(waits.expectedOrderingTime(delta.replicas.num_replicas)) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def test_replica_received_preprepare_with_unknown_request(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, chkFreqPatched, tconf): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) slow_nodes = txnPoolNodeSet[2:] nodes_stashers = [n.nodeIbStasher for n in slow_nodes] slow_replica_1 = txnPoolNodeSet[2].master_replica slow_replica_2 = txnPoolNodeSet[3].master_replica with delay_rules(nodes_stashers, ppgDelay()): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) params1 = slow_replica_1._ordering_service.spylog.getLastParams(OrderingService.process_preprepare) pp1 = params1["pre_prepare"] sender1 = params1["sender"] params2 = slow_replica_2._ordering_service.spylog.getLastParams(OrderingService.process_preprepare) pp2 = params2["pre_prepare"] sender2 = params2["sender"] looper.runFor(tconf.PROPAGATE_REQUEST_DELAY) assert (pp1, sender1, set(pp1.reqIdr)) not in slow_replica_1._ordering_service.prePreparesPendingFinReqs assert (pp2, sender2, set(pp2.reqIdr)) not in slow_replica_2._ordering_service.prePreparesPendingFinReqs
def test_forced_upgrade_handled_once_if_request_received_after_propagate( looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee, validUpgradeExpForceTrue): """ Verifies that POOL_UPGRADE force=true request is handled one time in case the node commits the transaction to the ledger but during the 3PC-process receives the request directly from the client after a PROPAGATE from some other node """ slow_node = getNonPrimaryReplicas(nodeSet, instId=0)[-1].node slow_node.clientIbStasher.delay(req_delay()) slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Beta')) slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Gamma')) original_process_propagate = slow_node.nodeMsgRouter.routes[Propagate] original_process_request = slow_node.clientMsgRouter.routes[Request] def patched_process_propagate(msg: Propagate, frm: str): original_process_propagate(msg, frm) slow_node.clientIbStasher.reset_delays_and_process_delayeds() slow_node.nodeMsgRouter.routes[Propagate] = original_process_propagate def patched_process_request(request: Request, frm: str): original_process_request(request, frm) slow_node.nodeIbStasher.reset_delays_and_process_delayeds() slow_node.clientMsgRouter.routes[Request] = original_process_request slow_node.nodeMsgRouter.routes[Propagate] = patched_process_propagate slow_node.clientMsgRouter.routes[Request] = patched_process_request init_len = len(list(slow_node.upgrader._actionLog)) sdk_ensure_upgrade_sent(looper, sdk_pool_handle, sdk_wallet_trustee, validUpgradeExpForceTrue) looper.runFor(waits.expectedUpgradeScheduled()) checkUpgradeScheduled([slow_node], validUpgradeExpForceTrue[VERSION]) if init_len == 0: # first upgrade - should be only one scheduled assert len(list(slow_node.upgrader._actionLog)) == 1 else: # one upgrade were already scheduled. we should cancel it and schedule new one # so action log should be increased by 2 assert len(list(slow_node.upgrader._actionLog)) == init_len + 2 assert slow_node.upgrader._actionLog.lastEvent[1] == UpgradeLog.SCHEDULED
def setup(txnPoolNodeSet): faulty_node = txnPoolNodeSet[-1] # Set quorum a bit more, so that faulty_node will request propagates faulty_node.quorums.propagate = Quorum(3) # do not receive requests and Propagates so that Propagates will be requested faulty_node.clientIbStasher.delay(req_delay()) faulty_node.nodeIbStasher.delay(ppgDelay()) return faulty_node
def test_belated_propagate_not_processed_if_already_ordered( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client): delta = txnPoolNodeSet[3] initial_ledger_size = delta.domainLedger.size delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma')) one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1) sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle) delta.nodeIbStasher.reset_delays_and_process_delayeds() looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))) for node in txnPoolNodeSet: assert node.domainLedger.size - initial_ledger_size == 1
def test_old_view_requests_processed_during_view_change( looper, nodeSet, client1, wallet1): """ Make sure that requests sent before view change started are processed and replies are returned: - delay Propogates (to make sure that requests are not ordered before view change is started) - send requests - check that requests are ordered despite of view change being in progress """ for node in nodeSet: node.view_change_in_progress = False node.nodeIbStasher.delay(ppgDelay(3, 0)) reqs = sendRandomRequests(wallet1, client1, 2) looper.runFor(1) for node in nodeSet: node.view_change_in_progress = True waitForSufficientRepliesForRequests(looper, client1, requests=reqs)
def test_replica_received_preprepare_with_unknown_request( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, chkFreqPatched): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) replica = txnPoolNodeSet[1].master_replica start_request_propagate_count = replica.node.spylog.count( Node.request_propagates) with delay_rules(txnPoolNodeSet[1].nodeIbStasher, ppgDelay(delay=10)): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) params = replica.spylog.getLastParams(Replica.processPrePrepare) pp = params["pre_prepare"] sender = params["sender"] assert (pp, sender, set(pp.reqIdr)) not in replica.prePreparesPendingFinReqs assert 1 == replica.node.spylog.count( Node.request_propagates) - start_request_propagate_count
def test_replica_received_preprepare_with_unknown_request(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, chkFreqPatched, tconf): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) replica = txnPoolNodeSet[1].master_replica start_request_propagate_count = sum_of_request_propagates(txnPoolNodeSet[1]) with delay_rules(txnPoolNodeSet[1].nodeIbStasher, ppgDelay(delay=10)): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1) params = replica._ordering_service.spylog.getLastParams(OrderingService.process_preprepare) pp = params["pre_prepare"] sender = params["sender"] looper.runFor(tconf.PROPAGATE_REQUEST_DELAY) assert (pp, sender, set(pp.reqIdr)) not in replica._ordering_service.prePreparesPendingFinReqs assert 1 == sum_of_request_propagates(txnPoolNodeSet[1]) - start_request_propagate_count
def test_old_view_requests_processed_during_view_change(looper, txnPoolNodeSet, sdk_wallet_handle, sdk_wallet_client): """ Make sure that requests sent before view change started are processed and replies are returned: - delay Propogates (to make sure that requests are not ordered before view change is started) - send requests - check that requests are ordered despite of view change being in progress """ for node in txnPoolNodeSet: node.view_change_in_progress = False node.nodeIbStasher.delay(ppgDelay(3, 0)) requests = sdk_send_random_requests(looper, sdk_wallet_handle, sdk_wallet_client, 2) looper.runFor(1) for node in txnPoolNodeSet: node.view_change_in_progress = True sdk_get_replies(looper, requests)
def test_node_handles_forced_upgrade_on_client_request( looper, nodeSet, sdk_pool_handle, sdk_wallet_trustee, validUpgradeExpForceTrue): """ Verifies that POOL_UPGRADE force=true request is handled immediately when the node receives it directly from the client """ slow_node = getNonPrimaryReplicas(nodeSet, instId=0)[-1].node # Stash all except client requests slow_node.nodeIbStasher.delay(ppgDelay()) slow_node.nodeIbStasher.delay(ppDelay()) slow_node.nodeIbStasher.delay(pDelay()) slow_node.nodeIbStasher.delay(cDelay()) sdk_send_upgrade(looper, sdk_pool_handle, sdk_wallet_trustee, validUpgradeExpForceTrue) looper.run(eventually(checkUpgradeScheduled, [slow_node], validUpgradeExpForceTrue[VERSION], retryWait=1, timeout=waits.expectedUpgradeScheduled()))
def test_view_change_add_one_node_uncommitted_by_next_primary( looper, tdir, tconf, allPluginsPath, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, sdk_wallet_steward): # 1. Pre-requisites: viewNo=2, Primary is Node3 for viewNo in range(1, 3): trigger_view_change(txnPoolNodeSet) waitForViewChange(looper, txnPoolNodeSet, viewNo) ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30) # 2. Add Steward for new Node new_steward_wallet_handle = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward, alias="testClientSteward" + randomString(3), role=STEWARD_STRING) # 3. Send txn to add Node5 # It will not be proposed and ordered by the current Primary, but will be proposed by the next one in the new view # Make sure that the request is propagated by the next Primary old_state_root_hash = txnPoolNodeSet[0].stateRootHash( ledgerId=POOL_LEDGER_ID, isCommitted=False) primary_node = getPrimaryReplica(txnPoolNodeSet).node next_primary = txnPoolNodeSet[-1] with delay_rules_without_processing(primary_node.nodeIbStasher, ppgDelay()): sdk_add_new_node(looper, sdk_pool_handle, new_steward_wallet_handle, new_node_name="Psi", tdir=tdir, tconf=tconf, allPluginsPath=allPluginsPath, autoStart=True, nodeClass=TestNode, do_post_node_creation=None, services=[VALIDATOR], wait_till_added=False) looper.run(eventually(check_node_txn_propagated, [next_primary])) check_node_txn_not_applied(txnPoolNodeSet, old_state_root_hash) # 4. Trigger view change to view # Make sure that only the next Primary (Node4) finishes View Change to view=3 slow_nodes = txnPoolNodeSet[:3] fast_nodes = [next_primary] slow_stashers = [slow_node.nodeIbStasher for slow_node in slow_nodes] with delay_rules_without_processing( slow_stashers, nv_delay(), msg_rep_delay(types_to_delay=[NEW_VIEW])): trigger_view_change(txnPoolNodeSet) waitForViewChange(looper, txnPoolNodeSet, 3) # view change is finished on Node4 only looper.run(eventually(check_view_change_done, fast_nodes, 3)) for n in slow_nodes: assert n.master_replica._consensus_data.waiting_for_new_view # wait till fast nodes apply the Node txn in the new View (Node4 creates a new batch with it) looper.run( eventually(check_node_txn_applied, fast_nodes, old_state_root_hash)) check_node_txn_not_applied(slow_nodes, old_state_root_hash) # 5. Trigger view change to view=4, and make sure it's finished properly trigger_view_change(txnPoolNodeSet) waitForViewChange(looper, txnPoolNodeSet, 4) ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=35) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def setup(txnPoolNodeSet): faulty_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[1].node faulty_node.nodeIbStasher.delay(ppgDelay(90)) return faulty_node