Exemple #1
0
def test_slow_catchup_while_ordering(tdir, tconf, looper, txnPoolNodeSet,
                                     sdk_pool_handle, sdk_wallet_client):
    lagging_node = txnPoolNodeSet[-1]
    other_lagging_node = txnPoolNodeSet[-2]
    other_nodes = txnPoolNodeSet[:-1]
    other_stashers = [node.nodeIbStasher for node in other_nodes]

    def lagging_node_state() -> NodeLeecherService.State:
        return lagging_node.ledgerManager._node_leecher._state

    def check_lagging_node_is_not_syncing_audit():
        assert lagging_node_state() != NodeLeecherService.State.SyncingAudit

    # Prevent lagging node from ordering
    with delay_rules(lagging_node.nodeIbStasher, ppDelay(), pDelay(),
                     cDelay()):
        # Order request on all nodes except lagging one
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 1)

        # Prevent lagging node from catching up domain ledger (and finishing catchup)
        with delay_rules(other_stashers, delay_domain_ledger_catchup()):
            # Start catchup on lagging node
            lagging_node.ledgerManager.start_catchup()
            assert lagging_node_state(
            ) == NodeLeecherService.State.SyncingAudit

            # Ensure that audit ledger is caught up by lagging node
            looper.run(eventually(check_lagging_node_is_not_syncing_audit))
            assert lagging_node_state() != NodeLeecherService.State.Idle

            # Order one more request on all nodes except lagging one
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 1)

        # Now lagging node can catch up domain ledger which contains more transactions
        # than it was when audit ledger was caught up

    # Now delayed 3PC messages reach lagging node, so any transactions missed during
    # catch up can be ordered, ensure that all nodes will have same data after that
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    # Ensure that even if we disable some other node pool is still functional
    # (it won't be the case if old lagging node is nonfunctional)
    with delay_rules(other_lagging_node.nodeIbStasher, ppDelay(), pDelay(),
                     cDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 1)

    # Ensure that all nodes will eventually have same data
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_commits_recvd_first(looper, txnPoolNodeSet, sdk_wallet_client,
                             sdk_pool_handle):
    slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1]
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    delay = 50
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=20,
                                         num_batches=4)

    assert not slow_node.master_replica.prePrepares
    assert not slow_node.master_replica.prepares
    assert not slow_node.master_replica.commits
    assert len(slow_node.master_replica.commitsWaitingForPrepare) > 0

    slow_node.reset_delays_and_process_delayeds()
    waitNodeDataEquality(looper, slow_node, *other_nodes)
    assert check_if_all_equal_in_list(
        [n.master_replica.ordered for n in txnPoolNodeSet])

    assert slow_node.master_replica.prePrepares
    assert slow_node.master_replica.prepares
    assert slow_node.master_replica.commits
    assert not slow_node.master_replica.commitsWaitingForPrepare
def test_backup_can_order_after_catchup(txnPoolNodeSet, looper,
                                        sdk_pool_handle, sdk_wallet_client):
    # We expect that after VC Gamma will be primary on backup
    delayed_node = txnPoolNodeSet[-2]
    with delay_rules_without_processing(delayed_node.nodeIbStasher,
                                        pDelay(instId=MASTER_REPLICA_INDEX),
                                        cDelay(instId=MASTER_REPLICA_INDEX),
                                        ppDelay(instId=MASTER_REPLICA_INDEX)):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, REQUEST_COUNT)
        with delay_rules_without_processing(
            [n.nodeIbStasher for n in txnPoolNodeSet],
                old_view_pp_request_delay()):
            ensure_view_change(looper, txnPoolNodeSet)
            ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
            assert delayed_node.replicas._replicas[BACKUP_INST_ID].isPrimary
            # Check, that backup cannot order
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, REQUEST_COUNT)
            for n in txnPoolNodeSet:
                assert n.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[
                    1] == 0
            # Forcing catchup
            delayed_node.start_catchup()
            ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

            # Check, that backup can order after catchup
            b_pp_seq_no_before = delayed_node.replicas._replicas[
                BACKUP_INST_ID].last_ordered_3pc[1]
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, REQUEST_COUNT)
            assert delayed_node.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == \
                   b_pp_seq_no_before + REQUEST_COUNT
Exemple #4
0
def test_pp_obsolescence_check_fail_for_delayed(tdir, tconf, looper,
                                                txnPoolNodeSet,
                                                sdk_pool_handle,
                                                sdk_wallet_client):

    delay = PATCHED_ACCEPTABLE_DEVIATION_PREPREPARE_SECS + 1
    lagging_node = txnPoolNodeSet[-1]

    # Prevent lagging node from ordering
    with delay_rules(lagging_node.nodeIbStasher, ppDelay(), pDelay(),
                     cDelay()):
        # Order request on all nodes except lagging one
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 1)
        looper.run(asyncio.sleep(delay))

    # Now delayed 3PC messages reach lagging node, so any delayed transactions
    # can be processed (PrePrepare would be discarded but requested after that),
    # ensure that all nodes will have same data after that
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    pp_count = get_count(lagging_node.master_replica,
                         lagging_node.master_replica.processPrePrepare)

    assert pp_count > 0
    assert get_timestamp_suspicion_count(lagging_node) == pp_count
Exemple #5
0
def do_view_change_with_unaligned_prepare_certificates(
        slow_nodes, nodes, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Perform view change with some nodes reaching lower last prepared certificate than others.
    With current implementation of view change this can result with view change taking a lot of time.
    """
    fast_nodes = [n for n in nodes if n not in slow_nodes]

    all_stashers = [n.nodeIbStasher for n in nodes]
    slow_stashers = [n.nodeIbStasher for n in slow_nodes]

    # Delay some PREPAREs and all COMMITs
    with delay_rules(slow_stashers, pDelay()):
        with delay_rules(all_stashers, cDelay()):
            # Send request
            request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

            # Wait until this request is prepared on fast nodes
            looper.run(eventually(check_last_prepared_certificate, fast_nodes, (0, 1)))
            # Make sure its not prepared on slow nodes
            looper.run(eventually(check_last_prepared_certificate, slow_nodes, None))

            # Trigger view change
            for n in nodes:
                n.view_changer.on_master_degradation()

        # Now commits are processed
        # Wait until view change is complete
        looper.run(eventually(check_view_change_done, nodes, 1, timeout=60))

    # Finish request gracefully
    sdk_get_reply(looper, request)
def test_clearing_forwarded_preprepared_request(looper, chkFreqPatched,
                                                reqs_for_checkpoint,
                                                txnPoolNodeSet,
                                                sdk_pool_handle,
                                                sdk_wallet_steward):
    # Case when backup ordered correctly, but primary had problems.
    # As a result, master will execute caughtup txns and will be removed
    # from requests queues
    behind_node = txnPoolNodeSet[-1]

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_steward,
                                         CHK_FREQ, CHK_FREQ)
    with delay_rules(behind_node.nodeIbStasher,
                     pDelay(delay=sys.maxsize, instId=0),
                     cDelay(delay=sys.maxsize, instId=0)):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)

        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)

        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    assert len(behind_node.requests) == 0
    assert all([
        len(q) == 0 for r in behind_node.replicas.values()
        for q in r.requestQueues.values()
    ])
    assert len(behind_node.clientAuthNr._verified_reqs) == 0
    assert len(behind_node.requestSender) == 0
def test_deletion_non_forwarded_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath):
    behind_node = txnPoolNodeSet[-1]
    [behind_node.replicas.values()[1].discard_req_key(1, key) for key in behind_node.requests]
    behind_node.requests.clear()

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_steward, CHK_FREQ, CHK_FREQ)
    behind_node.quorums.propagate = Quorum(len(txnPoolNodeSet) + 1)

    with delay_rules(behind_node.nodeIbStasher,
                     ppDelay(delay=sys.maxsize),
                     pDelay(delay=sys.maxsize),
                     cDelay(delay=sys.maxsize)):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)
        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)
        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    # We clear caughtup requests
    looper.run(eventually(lambda: assertExp(len(behind_node.requests) == 0)))
    assert all([len(q) == 0 for r in behind_node.replicas.values() for q in r._ordering_service.requestQueues.values()])
    assert len(behind_node.clientAuthNr._verified_reqs) == 0
    assert len(behind_node.requestSender) == 0
def test_revert_xfer_with_fees_before_catchup(looper, helpers,
                                              nodeSetWithIntegratedTokenPlugin,
                                              sdk_pool_handle, fees,
                                              xfer_mint_tokens,
                                              xfer_addresses):
    nodes = nodeSetWithIntegratedTokenPlugin
    node_stashers = [n.nodeIbStasher for n in nodes]
    helpers.general.do_set_fees(fees)
    [address_giver, address_receiver] = xfer_addresses
    inputs = helpers.general.get_utxo_addresses([address_giver])[0]
    outputs = [{
        ADDRESS: address_receiver,
        AMOUNT: 1000 - fees[XFER_PUBLIC_FEES_ALIAS]
    }]
    request = helpers.request.transfer(inputs, outputs)
    with delay_rules_without_processing(node_stashers, cDelay(), pDelay()):
        helpers.sdk.send_request_objects([request])
        looper.runFor(waits.expectedPrePrepareTime(len(nodes)))
        for n in nodes:
            n.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
        for n in nodes:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))
    ensure_all_nodes_have_same_data(looper, nodes)
def test_check_cdp_pp_storages(looper, txnPoolNodeSet, sdk_pool_handle,
                               sdk_wallet_client):
    def check_all_empty(replica):
        assert not bool(replica._consensus_data.preprepared)
        assert not bool(replica._consensus_data.prepared)

    def check_preprepared_not_empty(replica):
        assert bool(replica._consensus_data.preprepared)

    def check_prepared_not_empty(replica):
        assert bool(replica._consensus_data.prepared)

    def operation_for_replicas(operation, node_set=txnPoolNodeSet):
        for node in node_set:
            operation(node.master_replica)

    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]

    with delay_rules(node_stashers, pDelay()):
        with delay_rules(node_stashers, ppDelay()):
            sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
            looper.run(
                eventually(operation_for_replicas, check_all_empty,
                           txnPoolNodeSet[1:]))
            looper.run(
                eventually(operation_for_replicas, check_preprepared_not_empty,
                           txnPoolNodeSet[0:1]))
        looper.run(
            eventually(operation_for_replicas, check_preprepared_not_empty,
                       txnPoolNodeSet))
    looper.run(
        eventually(operation_for_replicas, check_prepared_not_empty,
                   txnPoolNodeSet))
Exemple #10
0
def test_freeing_forwarded_not_preprepared_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath):
    behind_node = txnPoolNodeSet[-1]
    behind_node.requests.clear()

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_steward,
                                         CHK_FREQ, CHK_FREQ)
    with delay_rules(
            behind_node.nodeIbStasher,
            chk_delay(delay=sys.maxsize,
                      instId=behind_node.replicas.values()[-1])):
        with delay_rules(behind_node.nodeIbStasher, ppDelay(delay=sys.maxsize),
                         pDelay(delay=sys.maxsize), cDelay(delay=sys.maxsize)):
            count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)
            sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                       sdk_wallet_steward, req_num, req_num)
            looper.run(
                eventually(node_caughtup, behind_node, count, retryWait=1))
            looper.run(
                eventually(
                    lambda: assertExp(len(behind_node.requests) == req_num)))

    # We execute caughtup requests
    looper.run(
        eventually(lambda: assertExp(len(behind_node.requests) == req_num)))
    assert all(r.executed for r in behind_node.requests.values()
               if behind_node.seqNoDB.get(r.request.key)[1])
Exemple #11
0
def test_check_cdp_pp_storages(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward):
    def check_all_empty(replica, reverse=False):
        check_preprepared_empty(replica, reverse)
        check_prepared_empty(replica, reverse)

    def check_preprepared_empty(replica, reverse=False):
        statement_pp = bool(replica._consensus_data.preprepared)
        statement_pp ^= reverse
        assert not statement_pp

    def check_prepared_empty(replica, reverse=False):
        statement_p = bool(replica._consensus_data.prepared)
        statement_p ^= reverse
        assert not statement_p

    def operation_for_replicas(operation, node_set=txnPoolNodeSet, reverse=False):
        for node in node_set:
            operation(node.master_replica, reverse)

    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]

    with delay_rules(node_stashers, cDelay()):
        with delay_rules(node_stashers, pDelay()):
            with delay_rules(node_stashers, ppDelay()):
                sdk_add_new_nym_without_waiting(looper, sdk_pool_handle, sdk_wallet_steward)
                looper.run(eventually(operation_for_replicas, check_all_empty, txnPoolNodeSet[1:]))
                looper.run(eventually(operation_for_replicas, check_preprepared_empty, txnPoolNodeSet[0:1], True))
            looper.run(eventually(operation_for_replicas, check_preprepared_empty, txnPoolNodeSet, True))
        looper.run(eventually(operation_for_replicas, check_prepared_empty, txnPoolNodeSet, True))
    looper.run(eventually(operation_for_replicas, check_all_empty, txnPoolNodeSet, True))
def test_new_primary_lagging_behind(looper, txnPoolNodeSet, sdk_wallet_client,
                                    sdk_pool_handle, tconf):
    initial_view_no = checkViewNoForNodes(txnPoolNodeSet)
    next_primary_name = get_next_primary_name(txnPoolNodeSet,
                                              initial_view_no + 1)
    next_primary = [n for n in txnPoolNodeSet
                    if n.name == next_primary_name][0]
    other_nodes = [n for n in txnPoolNodeSet if n != next_primary]
    expected_primary_name = get_next_primary_name(txnPoolNodeSet,
                                                  initial_view_no + 2)
    # Next primary cannot stabilize 1 checkpoint
    with delay_rules(next_primary.nodeIbStasher, cDelay(), pDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, CHK_FREQ)
        ensure_view_change(looper, txnPoolNodeSet)
        looper.run(
            eventually(check_not_in_view_change,
                       txnPoolNodeSet,
                       timeout=2 * tconf.NEW_VIEW_TIMEOUT))
        ensureElectionsDone(looper=looper,
                            nodes=other_nodes,
                            customTimeout=2 * tconf.NEW_VIEW_TIMEOUT,
                            instances_list=[0, 1])

    assert next_primary_name != expected_primary_name
    assert checkViewNoForNodes(txnPoolNodeSet) == initial_view_no + 2

    # send CHK_FREQ reqs so that slow node will start catch-up
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, CHK_FREQ)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30)
def test_deletion_non_forwarded_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath):
    behind_node = txnPoolNodeSet[-1]
    [behind_node.replicas.values()[1].discard_req_key(1, key) for key in behind_node.requests]
    behind_node.requests.clear()

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_steward, CHK_FREQ, CHK_FREQ)
    behind_node.quorums.propagate = Quorum(len(txnPoolNodeSet) + 1)

    with delay_rules(behind_node.nodeIbStasher,
                     ppDelay(delay=sys.maxsize),
                     pDelay(delay=sys.maxsize),
                     cDelay(delay=sys.maxsize)):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)
        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)
        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    # We clear caughtup requests
    assert len(behind_node.requests) == 0
    assert all([len(q) == 0 for r in behind_node.replicas.values() for q in r.requestQueues.values()])
    assert len(behind_node.clientAuthNr._verified_reqs) == 0
    assert len(behind_node.requestSender) == 0
def test_primary_receives_delayed_prepares(looper, txnPoolNodeSet,
                                           sdk_wallet_client,
                                           sdk_pool_handle):
    """
    Primary gets all PREPAREs after COMMITs
    """
    delay = 50
    primary_node = get_master_primary_node(txnPoolNodeSet)
    other_nodes = [n for n in txnPoolNodeSet if n != primary_node]
    primary_node.nodeIbStasher.delay(pDelay(delay, 0))

    sdk_send_random_and_check(looper,
                              txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              count=10)

    for node in other_nodes:
        assert node.master_replica.prePrepares
        assert node.master_replica.prepares
        assert node.master_replica.commits

    assert primary_node.master_replica.sentPrePrepares
    assert not primary_node.master_replica.prepares
    assert primary_node.master_replica.commits
Exemple #15
0
def do_view_change_with_unaligned_prepare_certificates(
        slow_nodes, nodes, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Perform view change with some nodes reaching lower last prepared certificate than others.
    With current implementation of view change this can result with view change taking a lot of time.
    """
    fast_nodes = [n for n in nodes if n not in slow_nodes]

    all_stashers = [n.nodeIbStasher for n in nodes]
    slow_stashers = [n.nodeIbStasher for n in slow_nodes]

    # Delay some PREPAREs and all COMMITs
    with delay_rules(slow_stashers, pDelay()):
        with delay_rules(all_stashers, cDelay()):
            # Send request
            request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

            # Wait until this request is prepared on fast nodes
            looper.run(eventually(check_last_prepared_certificate, fast_nodes, (0, 1)))
            # Make sure its not prepared on slow nodes
            looper.run(eventually(check_last_prepared_certificate, slow_nodes, None))

            # Trigger view change
            for n in nodes:
                n.view_changer.on_master_degradation()

        # Now commits are processed
        # Wait until view change is complete
        looper.run(eventually(check_view_change_done, nodes, 1, timeout=60))

    # Finish request gracefully
    sdk_get_reply(looper, request)
Exemple #16
0
def test_dequeue_and_validate_commits(looper, txnPoolNodeSet,
                                      sdk_wallet_client, sdk_pool_handle):
    slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1]
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    delay = 50
    with delay_rules(slow_node.nodeIbStasher, pDelay(delay),
                     msg_rep_delay(delay, [PREPARE, PREPREPARE])):
        with delay_rules(slow_node.nodeIbStasher, ppDelay(delay)):

            sdk_send_batches_of_random_and_check(looper,
                                                 txnPoolNodeSet,
                                                 sdk_pool_handle,
                                                 sdk_wallet_client,
                                                 num_reqs=1,
                                                 num_batches=1)

            assert not slow_node.master_replica._ordering_service.prePrepares
            assert not slow_node.master_replica._ordering_service.prepares
            assert not slow_node.master_replica._ordering_service.commits
            assert len(slow_node.master_replica._ordering_service.
                       commitsWaitingForPrepare) > 0

        waitNodeDataEquality(looper, slow_node, *other_nodes)
        assert check_if_all_equal_in_list([
            n.master_replica._ordering_service.ordered for n in txnPoolNodeSet
        ])

        assert slow_node.master_replica._ordering_service.prePrepares
        assert slow_node.master_replica._ordering_service.prepares
        assert slow_node.master_replica._ordering_service.commits
        assert not slow_node.master_replica._ordering_service.commitsWaitingForPrepare

        assert all(slow_node.master_replica.last_ordered_3pc ==
                   n.master_replica.last_ordered_3pc for n in other_nodes)
def test_primary_recvs_3phase_message_outside_watermarks(perf_chk_patched, chkFreqPatched, looper, txnPoolNodeSet,
                                                         sdk_pool_handle, sdk_wallet_client, reqs_for_logsize):
    """
    One of the primary starts getting lot of requests, more than his log size
    and queues up requests since they will go beyond its watermarks. This
    happens since other nodes are slow in processing its PRE-PREPARE.
    Eventually this primary will send PRE-PREPARE for all requests and those
    requests will complete
    """
    tconf = perf_chk_patched
    delay = 2
    instId = 0
    reqs_to_send = 2 * reqs_for_logsize + 1
    logger.debug('Will send {} requests'.format(reqs_to_send))

    npr = getNonPrimaryReplicas(txnPoolNodeSet, instId)
    pr = getPrimaryReplica(txnPoolNodeSet, instId)
    orderedCount = pr.stats.get(TPCStat.OrderSent)

    for r in npr:
        r.node.nodeIbStasher.delay(ppDelay(delay, instId))
        r.node.nodeIbStasher.delay(pDelay(delay, instId))

    tm_exec_1_batch = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    batch_count = math.ceil(reqs_to_send / tconf.Max3PCBatchSize)
    total_timeout = (tm_exec_1_batch + delay) * batch_count

    def chk():
        assert orderedCount + batch_count == pr.stats.get(TPCStat.OrderSent)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_to_send)
    looper.run(eventually(chk, retryWait=1, timeout=total_timeout))
def test_primary_recvs_3phase_message_outside_watermarks(perf_chk_patched, chkFreqPatched, looper, txnPoolNodeSet,
                                                         sdk_pool_handle, sdk_wallet_client, reqs_for_logsize):
    """
    One of the primary starts getting lot of requests, more than his log size
    and queues up requests since they will go beyond its watermarks. This
    happens since other nodes are slow in processing its PRE-PREPARE.
    Eventually this primary will send PRE-PREPARE for all requests and those
    requests will complete
    """
    tconf = perf_chk_patched
    delay = 5
    instId = 0
    reqs_to_send = 2 * reqs_for_logsize + 1
    logger.debug('Will send {} requests'.format(reqs_to_send))

    npr = getNonPrimaryReplicas(txnPoolNodeSet, instId)
    pr = getPrimaryReplica(txnPoolNodeSet, instId)
    from plenum.server.replica import TPCStat
    orderedCount = pr.stats.get(TPCStat.OrderSent)

    for r in npr:
        r.node.nodeIbStasher.delay(ppDelay(delay, instId))
        r.node.nodeIbStasher.delay(pDelay(delay, instId))

    tm_exec_1_batch = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    batch_count = math.ceil(reqs_to_send / tconf.Max3PCBatchSize)
    total_timeout = (tm_exec_1_batch + delay) * batch_count

    def chk():
        assert orderedCount + batch_count == pr.stats.get(TPCStat.OrderSent)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_to_send)
    looper.run(eventually(chk, retryWait=1, timeout=total_timeout))
def test_node_handles_forced_upgrade_on_propagate(looper, nodeSet,
                                                  sdk_pool_handle,
                                                  sdk_wallet_trustee,
                                                  validUpgradeExpForceTrue):
    """
    Verifies that POOL_UPGRADE force=true request is handled immediately when
    the node receives it in a PROPAGATE from any other node
    """
    slow_node = getNonPrimaryReplicas(nodeSet, instId=0)[-1].node

    # Stash all except PROPAGATEs from Gamma
    slow_node.clientIbStasher.delay(req_delay())
    slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Alpha'))
    slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Beta'))
    slow_node.nodeIbStasher.delay(ppDelay())
    slow_node.nodeIbStasher.delay(pDelay())
    slow_node.nodeIbStasher.delay(cDelay())

    sdk_send_upgrade(looper, sdk_pool_handle, sdk_wallet_trustee,
                     validUpgradeExpForceTrue)

    looper.run(
        eventually(checkUpgradeScheduled, [slow_node],
                   validUpgradeExpForceTrue[VERSION],
                   retryWait=1,
                   timeout=waits.expectedUpgradeScheduled()))
Exemple #20
0
def test_freeing_forwarded_preprepared_request(looper, chkFreqPatched,
                                               reqs_for_checkpoint,
                                               txnPoolNodeSet, sdk_pool_handle,
                                               sdk_wallet_steward):
    # Case, when both backup and primary had problems
    behind_node = txnPoolNodeSet[-1]

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_steward,
                                         CHK_FREQ, CHK_FREQ)
    with delay_rules(
            behind_node.nodeIbStasher,
            pDelay(delay=sys.maxsize),
            cDelay(delay=sys.maxsize),
    ):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)

        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)

        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    looper.run(
        eventually(lambda: assertExp(len(behind_node.requests) == req_num)))
    assert all(r.executed for r in behind_node.requests.values()
               if behind_node.seqNoDB.get(r.request.key)[1])

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_steward,
                                         CHK_FREQ, CHK_FREQ)

    # Master and backup replicas do not stash new requests and succesfully order them
    assert len(behind_node.requests) == req_num
def test_revert_nym_with_fees_before_catchup(looper, helpers,
                                             nodeSetWithIntegratedTokenPlugin,
                                             fees_set, fees, xfer_mint_tokens,
                                             xfer_addresses):
    nodes = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(xfer_mint_tokens)
    seq_no = get_seq_no(xfer_mint_tokens)
    lagging_node = nodes[-1]
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(),
                                        pDelay()):
        current_amount, seq_no, _ = send_and_check_nym_with_fees(
            helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
        looper.runFor(waits.expectedPrePrepareTime(len(nodes)))
        lagging_node.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
        for n in nodes:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))
    ensure_all_nodes_have_same_data(looper, nodes)
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
    ensure_all_nodes_have_same_data(looper, nodes)
def test_no_propagate_request_on_different_prepares_on_backup_before_vc(looper, txnPoolNodeSet,
                                                  sdk_pool_handle, sdk_wallet_client):
    '''
    1. Send random request
    2. Make 3 node on backup instance slow in getting prepares
    3. Send random request
    4. do view change
    5. reset delays
    => we expect that all nodes and all instances have the same last ordered
    '''
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    slow_instance = 1
    slow_nodes = txnPoolNodeSet[1:3]
    fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes]
    nodes_stashers = [n.nodeIbStasher for n in slow_nodes]
    old_last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
    with delay_rules(nodes_stashers, pDelay(instId=slow_instance)):
        with delay_rules(nodes_stashers, ppDelay(instId=slow_instance)):
            # send one request
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 1)
            old_view_no = txnPoolNodeSet[0].viewNo
            looper.run(
                eventually(is_prepared,
                           fast_nodes,
                           2,
                           slow_instance))

            # trigger view change on all nodes
            ensure_view_change(looper, txnPoolNodeSet)
            # wait for view change done on all nodes
            ensureElectionsDone(looper, txnPoolNodeSet)

    primary = getPrimaryReplica(txnPoolNodeSet, slow_instance).node
    non_primaries = [n for n in txnPoolNodeSet if n is not primary]

    check_last_ordered(non_primaries,
                       slow_instance,
                       (old_view_no, old_last_ordered[1] + 1))

    # Backup primary replica must not advance last_ordered_3pc
    # up to the master's value
    check_last_ordered([primary],
                       slow_instance,
                       (old_view_no, old_last_ordered[1]))

    check_last_ordered(txnPoolNodeSet,
                       txnPoolNodeSet[0].master_replica.instId,
                       (old_last_ordered[0], old_last_ordered[1] + 1))

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    looper.run(
        eventually(check_last_ordered,
                   txnPoolNodeSet,
                   slow_instance,
                   (txnPoolNodeSet[0].viewNo, 1)))
    assert all(0 == node.spylog.count(node.request_propagates)
               for node in txnPoolNodeSet)
def test_freeing_forwarded_preprepared_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward):
    # Case, when both backup and primary had problems
    behind_node = txnPoolNodeSet[-1]

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_steward, CHK_FREQ, CHK_FREQ)
    with delay_rules(behind_node.nodeIbStasher,
                     pDelay(delay=sys.maxsize),
                     cDelay(delay=sys.maxsize), ):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)

        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)

        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    assert len(behind_node.requests) == req_num
    assert all(r.executed for r in behind_node.requests.values() if behind_node.seqNoDB.get(r.request.key)[1])

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_steward, CHK_FREQ, CHK_FREQ)

    # Master and backup replicas do not stash new requests and succesfully order them
    assert len(behind_node.requests) == req_num
def test_commits_recvd_first(looper, txnPoolNodeSet,
                             sdk_wallet_client, sdk_pool_handle):
    slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1]
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    delay = 50
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=20,
                                         num_batches=4)

    assert not slow_node.master_replica.prePrepares
    assert not slow_node.master_replica.prepares
    assert not slow_node.master_replica.commits
    assert len(slow_node.master_replica.commitsWaitingForPrepare) > 0

    slow_node.reset_delays_and_process_delayeds()
    waitNodeDataEquality(looper, slow_node, *other_nodes)
    assert check_if_all_equal_in_list([n.master_replica.ordered
                                       for n in txnPoolNodeSet])

    assert slow_node.master_replica.prePrepares
    assert slow_node.master_replica.prepares
    assert slow_node.master_replica.commits
    assert not slow_node.master_replica.commitsWaitingForPrepare
Exemple #25
0
def setup(request, looper, txnPoolNodeSet, client1, wallet1, client1Connected):
    slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[1].node
    fast_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    slow_node.nodeIbStasher.delay(pDelay(100, 0))
    slow_node.nodeIbStasher.delay(cDelay(100, 0))
    if request.param == 'all':
        slow_node.nodeIbStasher.delay(ppDelay(100, 0))
    return slow_node, fast_nodes
def test_view_change_after_max_catchup_rounds(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
    """
    The node should do only a fixed rounds of catchup. For this delay Prepares
    and Commits for 2 non-primary nodes by a large amount which is equivalent
    to loss of Prepares and Commits. Make sure 2 nodes have a different last
    prepared certificate from other two. Then do a view change, make sure view
    change completes and the pool does not process the request that were
    prepared by only a subset of the nodes
    """
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_client, 2 * 3, 3)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    ledger_summary = txnPoolNodeSet[0].ledger_summary

    slow_nodes = [r.node for r in getNonPrimaryReplicas(
        txnPoolNodeSet, 0)[-2:]]
    fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes]

    # Make node slow to process Prepares and Commits
    for node in slow_nodes:
        node.nodeIbStasher.delay(pDelay(120, 0))
        node.nodeIbStasher.delay(cDelay(120, 0))

    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 5)
    looper.runFor(3)

    ensure_view_change(looper, nodes=txnPoolNodeSet)

    def last_prepared(nodes):
        lst = [n.master_replica.last_prepared_certificate_in_view()
               for n in nodes]
        # All nodes have same last prepared
        assert check_if_all_equal_in_list(lst)
        return lst[0]

    last_prepared_slow = last_prepared(slow_nodes)
    last_prepared_fast = last_prepared(fast_nodes)

    # Check `slow_nodes` and `fast_nodes` set different last_prepared
    assert last_prepared_fast != last_prepared_slow

    # View change complete
    ensureElectionsDone(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    # The requests which were prepared by only a subset of the nodes were
    # not ordered
    assert txnPoolNodeSet[0].ledger_summary == ledger_summary

    for node in slow_nodes:
        node.nodeIbStasher.reset_delays_and_process_delayeds()

    # Make sure pool is functional
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_client, 10, 2)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    last_prepared(txnPoolNodeSet)
def test_revert_auth_rule_changing(looper, txnPoolNodeSet, sdk_wallet_trustee,
                                   sdk_wallet_steward, sdk_pool_handle):
    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
    wh, _ = sdk_wallet_trustee
    new_steward_did, new_steward_verkey = create_verkey_did(looper, wh)
    new_steward_did2, new_steward_verkey2 = create_verkey_did(looper, wh)
    """We try to change rule for adding new steward. For this case we """
    changed_constraint = AuthConstraint(role=STEWARD, sig_count=1)
    action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD)
    with delay_rules_without_processing(node_stashers, pDelay(), cDelay()):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_wallet_trustee,
            sdk_pool_handle,
            auth_action=ADD_PREFIX,
            auth_type=action.txn_type,
            field=action.field,
            new_value=action.value,
            old_value=None,
            constraint=changed_constraint.as_dict,
            no_wait=True)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Try to add new steward by already existed trustee.
        Validation should raise exception because we change uncommitted state
        by adding new rule, that "Only steward can add new steward"
        """
        with pytest.raises(RequestRejectedException,
                           match="TRUSTEE can not do this action"):
            sdk_add_new_nym(looper,
                            sdk_pool_handle,
                            sdk_wallet_trustee,
                            'newSteward1',
                            STEWARD_STRING,
                            dest=new_steward_did,
                            verkey=new_steward_verkey)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Catchup should revert config_state and discard rule changing
        """
        for n in txnPoolNodeSet:
            n.start_catchup()
        for n in txnPoolNodeSet:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
    """
    Try to create new steward by steward
    We can not do this, because AUTH_RULE txn was reverted
    """
    with pytest.raises(RequestRejectedException,
                       match="STEWARD can not do this action"):
        sdk_add_new_nym(looper,
                        sdk_pool_handle,
                        sdk_wallet_steward,
                        'newSteward2',
                        STEWARD_STRING,
                        dest=new_steward_did2,
                        verkey=new_steward_verkey2)
Exemple #28
0
def test_view_change_after_max_catchup_rounds(txnPoolNodeSet, looper, wallet1,
                                              client1, client1Connected):
    """
    The node should do only a fixed rounds of catchup. For this delay Prepares
    and Commits for 2 non-primary nodes by a large amount which is equivalent
    to loss of Prepares and Commits. Make sure 2 nodes have a different last
    prepared certificate from other two. Then do a view change, make sure view
    change completes and the pool does not process the request that were
    prepared by only a subset of the nodes
    """
    send_reqs_batches_and_get_suff_replies(looper, wallet1, client1, 2 * 3, 3)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    ledger_summary = txnPoolNodeSet[0].elector.ledger_summary

    slow_nodes = [
        r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)[-2:]
    ]
    fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes]

    # Make node slow to process Prepares and Commits
    for node in slow_nodes:
        node.nodeIbStasher.delay(pDelay(120, 0))
        node.nodeIbStasher.delay(cDelay(120, 0))

    sendRandomRequests(wallet1, client1, 5)
    looper.runFor(3)

    ensure_view_change(looper, nodes=txnPoolNodeSet)

    def last_prepared(nodes):
        lst = [
            n.master_replica.last_prepared_certificate_in_view() for n in nodes
        ]
        # All nodes have same last prepared
        assert check_if_all_equal_in_list(lst)
        return lst[0]

    last_prepared_slow = last_prepared(slow_nodes)
    last_prepared_fast = last_prepared(fast_nodes)

    # Check `slow_nodes` and `fast_nodes` set different last_prepared
    assert last_prepared_fast != last_prepared_slow

    # View change complete
    ensureElectionsDone(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    # The requests which were prepared by only a subset of the nodes were
    # not ordered
    assert txnPoolNodeSet[0].elector.ledger_summary == ledger_summary

    for node in slow_nodes:
        node.nodeIbStasher.reset_delays_and_process_delayeds()

    # Make sure pool is functional
    ensure_pool_functional(looper, txnPoolNodeSet, wallet1, client1)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    last_prepared(txnPoolNodeSet)
def test_no_propagate_request_on_different_prepares_on_backup_before_vc(looper, txnPoolNodeSet,
                                                  sdk_pool_handle, sdk_wallet_client):
    ''' Send random request and do view change then fast_nodes (2,3 - with
    primary backup replica) will have prepare or send preprepare on backup
    replicas and slow_nodes are have not and transaction will ordered on all
    master replicas. Check last ordered after view change and after another
    one request.'''
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    slow_instance = 1
    slow_nodes = txnPoolNodeSet[1:3]
    fast_nodes = [n for n in txnPoolNodeSet if n not in slow_nodes]
    nodes_stashers = [n.nodeIbStasher for n in slow_nodes]
    old_last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
    with delay_rules(nodes_stashers, pDelay(instId=slow_instance)):
        with delay_rules(nodes_stashers, ppDelay(instId=slow_instance)):
            # send one request
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 1)
            old_view_no = txnPoolNodeSet[0].viewNo
            looper.run(
                eventually(is_prepared,
                           fast_nodes,
                           2,
                           slow_instance))

            # trigger view change on all nodes
            ensure_view_change(looper, txnPoolNodeSet)
            # wait for view change done on all nodes
            ensureElectionsDone(looper, txnPoolNodeSet)

    primary = getPrimaryReplica(txnPoolNodeSet, slow_instance).node
    non_primaries = [n for n in txnPoolNodeSet if n is not primary]

    check_last_ordered(non_primaries,
                       slow_instance,
                       (old_view_no, old_last_ordered[1] + 1))

    # Backup primary replica must not advance last_ordered_3pc
    # up to the master's value
    check_last_ordered([primary],
                       slow_instance,
                       (old_view_no, old_last_ordered[1]))

    check_last_ordered(txnPoolNodeSet,
                       txnPoolNodeSet[0].master_replica.instId,
                       (old_last_ordered[0], old_last_ordered[1] + 1))

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    looper.run(
        eventually(check_last_ordered,
                   txnPoolNodeSet,
                   slow_instance,
                   (txnPoolNodeSet[0].viewNo, 1)))
    assert all(0 == node.spylog.count(node.request_propagates)
               for node in txnPoolNodeSet)
def test_re_order_pre_prepares(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle):
    # 0. use new 3PC validator
    for n in txnPoolNodeSet:
        ordering_service = n.master_replica._ordering_service
        ordering_service._validator = OrderingServiceMsgValidator(
            ordering_service._data)

    # 1. drop Prepares and Commits on 4thNode
    # Order a couple of requests on Nodes 1-3
    lagging_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(),
                                        pDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 3)
        assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes)

    # 2. simulate view change start so that
    # all PrePrepares/Prepares/Commits are cleared
    # and uncommitted txns are reverted
    for n in txnPoolNodeSet:
        n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1))
        master_ordering_service = n.master_replica._ordering_service
        assert not master_ordering_service.prePrepares
        assert not master_ordering_service.prepares
        assert not master_ordering_service.commits
        assert master_ordering_service.old_view_preprepares
        ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID]
        state = n.db_manager.states[DOMAIN_LEDGER_ID]
        assert len(ledger.uncommittedTxns) == 0
        assert ledger.uncommitted_root_hash == ledger.tree.root_hash
        assert state.committedHead == state.head

    # 3. Simulate View Change finish to re-order the same PrePrepare
    assert lagging_node.master_last_ordered_3PC == (0, 0)
    new_master = txnPoolNodeSet[1]
    batches = sorted([
        preprepare_to_batch_id(pp) for _, pp in new_master.master_replica.
        _ordering_service.old_view_preprepares.items()
    ])
    new_view_msg = NewViewCheckpointsApplied(view_no=0,
                                             view_changes=[],
                                             checkpoint=None,
                                             batches=batches)
    for n in txnPoolNodeSet:
        n.master_replica._consensus_data.prev_view_prepare_cert = batches[
            -1].pp_seq_no
        n.master_replica._ordering_service._bus.send(new_view_msg)

    # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that
    # the request was eventually ordered on Node4 as well
    waitNodeDataEquality(looper, lagging_node, *other_nodes)
    assert lagging_node.master_last_ordered_3PC == (0, 4)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
def testNonPrimaryRecvs3PhaseMessageOutsideWatermarks(chkFreqPatched, looper,
                                                      txnPoolNodeSet, client1,
                                                      wallet1,
                                                      client1Connected,
                                                      reqs_for_logsize):
    """
    A node is slow in processing PRE-PREPAREs and PREPAREs such that lot of
    requests happen and the slow node has started getting 3 phase messages
    outside of it watermarks. Check that it queues up requests outside watermarks and once it
    has received stable checkpoint it processes more requests. It sends other
    nodes 3 phase messages older than their stable checkpoint so they should
    discard them.
    """
    delay = 15
    instId = 1
    reqsToSend = reqs_for_logsize + 2
    npr = getNonPrimaryReplicas(txnPoolNodeSet, instId)
    slowReplica = npr[0]
    slowNode = slowReplica.node
    slowNode.nodeIbStasher.delay(ppDelay(delay, instId))
    slowNode.nodeIbStasher.delay(pDelay(delay, instId))

    def discardCounts(replicas, pat):
        counts = {}
        for r in replicas:
            counts[r.name] = countDiscarded(r, pat)
        return counts

    oldStashCount = slowReplica.spylog.count(
        TestReplica.stashOutsideWatermarks.__name__)
    oldDiscardCounts = discardCounts(
        [n.replicas[instId] for n in txnPoolNodeSet if n != slowNode],
        'achieved stable checkpoint')

    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, reqsToSend,
                                        1)
    timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet))
    looper.run(
        eventually(checkNodeDataForEquality,
                   slowNode,
                   *[_ for _ in txnPoolNodeSet if _ != slowNode],
                   retryWait=1,
                   timeout=timeout))
    newStashCount = slowReplica.spylog.count(
        TestReplica.stashOutsideWatermarks.__name__)
    assert newStashCount > oldStashCount

    def chk():
        counts = discardCounts(
            [n.replicas[instId] for n in txnPoolNodeSet if n != slowNode],
            'achieved stable checkpoint')
        for nm, count in counts.items():
            assert count > oldDiscardCounts[nm]

    timeout = waits.expectedNodeToNodeMessageDeliveryTime() * \
        len(txnPoolNodeSet) + delay
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
Exemple #32
0
def test_node_request_preprepare(looper, txnPoolNodeSet, client1,
                                 wallet1, client1Connected, teardown):
    """
    Node requests PRE-PREPARE only once after getting PREPAREs.
    """
    slow_node, other_nodes, primary_node, \
        other_primary_nodes = split_nodes(txnPoolNodeSet)
    # Drop PrePrepares and Prepares
    slow_node.nodeIbStasher.delay(ppDelay(300, 0))
    slow_node.nodeIbStasher.delay(pDelay(300, 0))

    send_reqs_batches_and_get_suff_replies(looper, wallet1, client1, 10, 5)
    slow_node.nodeIbStasher.drop_delayeds()
    slow_node.nodeIbStasher.resetDelays()

    old_count_req = count_requested_preprepare_req(slow_node)
    old_count_resp = count_requested_preprepare_resp(slow_node)

    def chk(increase=True):
        # Method is called
        assert count_requested_preprepare_req(slow_node) > old_count_req
        # Requesting Preprepare
        assert count_requested_preprepare_resp(
            slow_node) - old_count_resp == (1 if increase else 0)

    for pp in primary_node.master_replica.sentPrePrepares.values():
        for rep in [n.master_replica for n in other_primary_nodes]:
            prepare = Prepare(rep.instId,
                              pp.viewNo,
                              pp.ppSeqNo,
                              pp.ppTime,
                              pp.digest,
                              pp.stateRootHash,
                              pp.txnRootHash
                              )
            rep.send(prepare)

        looper.run(eventually(chk, True, retryWait=1))

        old_count_resp = count_requested_preprepare_resp(slow_node)

        prepare = Prepare(rep.instId,
                          pp.viewNo,
                          pp.ppSeqNo,
                          pp.ppTime,
                          pp.digest,
                          pp.stateRootHash,
                          pp.txnRootHash
                          )
        rep.send(prepare)

        looper.run(eventually(chk, False, retryWait=1))

        old_count_req = count_requested_preprepare_req(slow_node)

        old_count_resp = count_requested_preprepare_resp(slow_node)
Exemple #33
0
def test_revert_for_all_after_view_change(looper, helpers,
                                          nodeSetWithIntegratedTokenPlugin,
                                          sdk_pool_handle, fees_set,
                                          mint_tokens, addresses, fees):
    node_set = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = get_seq_no(mint_tokens)
    reverted_node = nodeSetWithIntegratedTokenPlugin[-1]

    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)
    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, addresses, fees, looper, current_amount, seq_no)

    ensure_all_nodes_have_same_data(looper, node_set)

    with delay_rules([n.nodeIbStasher for n in node_set], cDelay(), pDelay()):
        len_batches_before = len(reverted_node.master_replica.batches)
        current_amount, seq_no, resp1 = send_and_check_transfer(
            helpers,
            addresses,
            fees,
            looper,
            current_amount,
            seq_no,
            check_reply=False)
        current_amount, seq_no, resp2 = send_and_check_nym_with_fees(
            helpers,
            fees_set,
            seq_no,
            looper,
            addresses,
            current_amount,
            check_reply=False)
        looper.runFor(
            waits.expectedPrePrepareTime(
                len(nodeSetWithIntegratedTokenPlugin)))
        len_batches_after = len(reverted_node.master_replica.batches)
        """
        Checks, that we have a 2 new batches
        """
        assert len_batches_after - len_batches_before == 2
        for n in node_set:
            n.view_changer.on_master_degradation()

        ensure_view_change_complete(looper, nodeSetWithIntegratedTokenPlugin)

        looper.run(
            eventually(
                lambda: assertExp(reverted_node.mode == Mode.participating)))
    ensure_all_nodes_have_same_data(looper, node_set)
    sdk_get_and_check_replies(looper, resp1)
    sdk_get_and_check_replies(looper, resp2)
    send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                 current_amount)
    ensure_all_nodes_have_same_data(looper, node_set)
def make_master_replica_lag(node):

    # class AbysmalBox(list):
    #     def append(self, object) -> None:
    #         pass
    #
    # node.replicas._master_replica.inBox = AbysmalBox()
    node.nodeIbStasher.delay(ppDelay(1200, 0))
    node.nodeIbStasher.delay(pDelay(1200, 0))
    node.nodeIbStasher.delay(cDelay(1200, 0))
def test_backups_dont_order_while_reordering(txnPoolNodeSet,
                                             sdk_pool_handle,
                                             sdk_wallet_client,
                                             looper):
    """
    This test needs to show that for now we stop ordering on backups
    until master in reordering state after view_change
    Steps:
    1. Delay ordering on master replica for collecting requests to reorder after VC
    2. Make sure that master didn't order
    3. Delay old_view_pp_request and force VC
    4. Ensure that all backup replica on all nodes cannot order
       because primary waiting for reordering on master
    """

    def check_pp_count(node, expected_count, inst_id=0):
        assert node.replicas._replicas[inst_id].last_ordered_3pc[1] == expected_count, \
            "master last ordered: {}, backup_last_ordered: {}".format(node.master_replica._ordering_service.batches,
                                                                      node.replicas._replicas[
                                                                          1]._ordering_service.batches)

    # We expect that after VC Gamma will be primary on backup
    delayed_node = txnPoolNodeSet[-2]
    fast_nodes = [n for n in txnPoolNodeSet if n != delayed_node]
    master_pp_seq_no_before = delayed_node.master_replica.last_ordered_3pc[1]
    with delay_rules_without_processing(delayed_node.nodeIbStasher,
                                        pDelay(instId=MASTER_REPLICA_INDEX),
                                        cDelay(instId=MASTER_REPLICA_INDEX),
                                        msg_req_delay(),
                                        msg_rep_delay(),
                                        ppDelay(instId=MASTER_REPLICA_INDEX)):
        sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING)
        looper.run(eventually(check_pp_count, delayed_node, REQS_FOR_REORDERING, BACKUP_INST_ID))
        assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before
        with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], old_view_pp_request_delay()):
            ensure_view_change(looper, txnPoolNodeSet)

            # check that view change is finished on all nodes
            looper.run(eventually(check_not_in_view_change, txnPoolNodeSet))

            # check that delayed node is selected on all fast nodes but not on delayed node
            def check_backup_primaries():
                assert delayed_node.replicas[BACKUP_INST_ID]._consensus_data.primary_name is None
                assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before
                assert all(
                    n.replicas[BACKUP_INST_ID]._consensus_data.primary_name == generateName(delayed_node.name,
                                                                                            instId=BACKUP_INST_ID)
                    for n in fast_nodes
                )

            looper.run(eventually(check_backup_primaries))

            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING)
            for node in txnPoolNodeSet:
                assert node.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == 0
def setup(request, looper, txnPoolNodeSet):
    slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[1].node
    fast_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    # Delay catchup reply so that the test gets time to make the check,
    # this delay is reset after the check
    slow_node.nodeIbStasher.delay(cr_delay(100))
    slow_node.nodeIbStasher.delay(pDelay(100, 0))
    slow_node.nodeIbStasher.delay(cDelay(100, 0))
    if request.param == 'all':
        slow_node.nodeIbStasher.delay(ppDelay(100, 0))
    return slow_node, fast_nodes
def setup(request, looper, txnPoolNodeSet):
    slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[1].node
    fast_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    # Delay catchup reply so that the test gets time to make the check,
    # this delay is reset after the check
    slow_node.nodeIbStasher.delay(cr_delay(100))
    slow_node.nodeIbStasher.delay(pDelay(100, 0))
    slow_node.nodeIbStasher.delay(cDelay(100, 0))
    if request.param == 'all':
        slow_node.nodeIbStasher.delay(ppDelay(100, 0))
    return slow_node, fast_nodes
Exemple #38
0
def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet,
                                          sdk_wallet_client, sdk_pool_handle):
    """
    Send commits but delay pre-prepare and prepares such that enough
    commits are received, now the request should not be ordered until
    pre-prepare is received and ordering should just happen once,
    """
    delay = 10
    non_prim_reps = getNonPrimaryReplicas(txnPoolNodeSet, 0)

    slow_rep = non_prim_reps[0]
    slow_node = slow_rep.node
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    stash_pp = []
    stash_p = []
    orig_pp_method = slow_rep._ordering_service.process_preprepare
    orig_p_method = slow_rep._ordering_service.process_prepare

    def patched_pp(self, msg, sender):
        stash_pp.append((msg, sender))

    def patched_p(self, msg, sender):
        stash_p.append((msg, sender))

    slow_rep._ordering_service.process_preprepare = \
        types.MethodType(patched_pp, slow_rep)
    slow_rep._ordering_service.process_prepare = \
        types.MethodType(patched_p, slow_rep)

    def chk1():
        assert len(slow_rep._ordering_service.commitsWaitingForPrepare) > 0

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay
    looper.run(eventually(chk1, retryWait=1, timeout=timeout))

    for m, s in stash_pp:
        orig_pp_method(m, s)

    for m, s in stash_p:
        orig_p_method(m, s)

    def chk2():
        assert len(slow_rep._ordering_service.commitsWaitingForPrepare) == 0
        assert slow_rep._ordering_service.spylog.count(
            slow_rep._ordering_service._do_order.__name__) == 1

    timeout = waits.expectedOrderingTime(len(non_prim_reps) + 1) + 2 * delay
    looper.run(eventually(chk2, retryWait=1, timeout=timeout))
Exemple #39
0
def scenario_txns_during_view_change(looper,
                                     helpers,
                                     nodes,
                                     io_addresses,
                                     send_txns,
                                     send_txns_invalid=None):
    lagging_node = nodes[-1]
    rest_nodes = nodes[:-1]

    def send_txns_invalid_default():
        # TODO non-public API is used
        addr = helpers.wallet.address_map[io_addresses()[0][0]]
        seq_no = list(addr.outputs[0])[0]
        assert addr.outputs[0][seq_no] > 0

        # de-sync client-server utxos states
        addr.outputs[0][seq_no] += 1
        with pytest.raises(RequestRejectedException,
                           match='Insufficient funds'):
            send_txns()
        # sync back client-server utxos states
        addr.outputs[0][seq_no] -= 1

    # Send transactions
    send_txns()
    ensure_all_nodes_have_same_data(looper, nodes)

    # Lag one node (delay Prepare and  Commit messages for lagging_node)
    with delay_rules(lagging_node.nodeIbStasher, pDelay(), cDelay()):
        # Send more transactions
        send_txns()
        ensure_all_nodes_have_same_data(looper, rest_nodes)

        # Send invalid transactions
        (send_txns_invalid or send_txns_invalid_default)()
        ensure_all_nodes_have_same_data(looper, rest_nodes)

        # Initiate view change
        # Wait until view change is finished and check that needed transactions are written.
        ensure_view_change(looper, nodes)
        looper.run(eventually(check_not_in_view_change, nodes))
    ensureElectionsDone(looper, nodes)

    # Reset delays
    # Make sure that all nodes have equal state
    # (expecting that lagging_node caught up missed ones)
    ensure_all_nodes_have_same_data(looper, nodes)

    # make sure the poll is functional
    send_txns()
    ensure_all_nodes_have_same_data(looper, nodes)
def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet,
                                          sdk_wallet_client, sdk_pool_handle):
    """
    Send commits but delay pre-prepare and prepares such that enough
    commits are received, now the request should not be ordered until
    pre-prepare is received and ordering should just happen once,
    """
    delay = 10
    non_prim_reps = getNonPrimaryReplicas(txnPoolNodeSet, 0)

    slow_rep = non_prim_reps[0]
    slow_node = slow_rep.node
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    stash_pp = []
    stash_p = []
    orig_pp_method = slow_rep.processPrePrepare
    orig_p_method = slow_rep.processPrepare

    def patched_pp(self, msg, sender):
        stash_pp.append((msg, sender))

    def patched_p(self, msg, sender):
        stash_p.append((msg, sender))

    slow_rep.processPrePrepare = \
        types.MethodType(patched_pp, slow_rep)
    slow_rep.processPrepare = \
        types.MethodType(patched_p, slow_rep)

    def chk1():
        assert len(slow_rep.commitsWaitingForPrepare) > 0

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay
    looper.run(eventually(chk1, retryWait=1, timeout=timeout))

    for m, s in stash_pp:
        orig_pp_method(m, s)

    for m, s in stash_p:
        orig_p_method(m, s)

    def chk2():
        assert len(slow_rep.commitsWaitingForPrepare) == 0
        assert slow_rep.spylog.count(slow_rep.doOrder.__name__) == 1

    timeout = waits.expectedOrderingTime(len(non_prim_reps) + 1) + 2 * delay
    looper.run(eventually(chk2, retryWait=1, timeout=timeout))
def test_freeing_forwarded_not_preprepared_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath):
    behind_node = txnPoolNodeSet[-1]
    behind_node.requests.clear()

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_steward, CHK_FREQ, CHK_FREQ)
    with delay_rules(behind_node.nodeIbStasher,
                     ppDelay(delay=sys.maxsize),
                     pDelay(delay=sys.maxsize),
                     cDelay(delay=sys.maxsize)):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)
        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)
        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    # We execute caughtup requests
    assert len(behind_node.requests) == req_num
    assert all(r.executed for r in behind_node.requests.values() if behind_node.seqNoDB.get(r.request.key)[1])
def test_clearing_forwarded_preprepared_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward):
    # Case when backup ordered correctly, but primary had problems.
    # As a result, master will execute caughtup txns and will be removed
    # from requests queues
    behind_node = txnPoolNodeSet[-1]

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_steward, CHK_FREQ, CHK_FREQ)
    with delay_rules(behind_node.nodeIbStasher,
                     pDelay(delay=sys.maxsize, instId=0),
                     cDelay(delay=sys.maxsize, instId=0)):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)

        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)

        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    assert len(behind_node.requests) == 0
    assert all([len(q) == 0 for r in behind_node.replicas.values() for q in r.requestQueues.values()])
    assert len(behind_node.clientAuthNr._verified_reqs) == 0
    assert len(behind_node.requestSender) == 0
def make_master_replica_lag(node):
    node.nodeIbStasher.delay(ppDelay(1200, 0))
    node.nodeIbStasher.delay(pDelay(1200, 0))
    node.nodeIbStasher.delay(cDelay(1200, 0))
def test_node_request_preprepare(looper, txnPoolNodeSet,
                                 sdk_wallet_client, sdk_pool_handle,
                                 teardown):
    """
    Node requests PRE-PREPARE only once after getting PREPAREs.
    """
    slow_node, other_nodes, primary_node, \
    other_primary_nodes = split_nodes(txnPoolNodeSet)
    # Drop PrePrepares and Prepares
    slow_node.nodeIbStasher.delay(ppDelay(300, 0))
    slow_node.nodeIbStasher.delay(pDelay(300, 0))

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=10,
                                         num_batches=5)
    slow_node.nodeIbStasher.drop_delayeds()
    slow_node.nodeIbStasher.resetDelays()

    old_count_req = count_requested_preprepare_req(slow_node)
    old_count_resp = count_requested_preprepare_resp(slow_node)

    def chk(increase=True):
        # Method is called
        assert count_requested_preprepare_req(slow_node) > old_count_req
        # Requesting Preprepare
        assert count_requested_preprepare_resp(
            slow_node) - old_count_resp == (1 if increase else 0)

    for pp in primary_node.master_replica.sentPrePrepares.values():
        for rep in [n.master_replica for n in other_primary_nodes]:
            prepare = Prepare(rep.instId,
                              pp.viewNo,
                              pp.ppSeqNo,
                              pp.ppTime,
                              pp.digest,
                              pp.stateRootHash,
                              pp.txnRootHash
                              )
            rep.send(prepare)

        looper.run(eventually(chk, True, retryWait=1))

        old_count_resp = count_requested_preprepare_resp(slow_node)

        prepare = Prepare(rep.instId,
                          pp.viewNo,
                          pp.ppSeqNo,
                          pp.ppTime,
                          pp.digest,
                          pp.stateRootHash,
                          pp.txnRootHash
                          )
        rep.send(prepare)

        looper.run(eventually(chk, False, retryWait=1))

        old_count_req = count_requested_preprepare_req(slow_node)

        old_count_resp = count_requested_preprepare_resp(slow_node)
def test_non_primary_recvs_3phase_message_outside_watermarks(
        chkFreqPatched, reqs_for_logsize, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    A node is slow in receiving PRE-PREPAREs and PREPAREs. A lot of requests
    are sent and the slow node has started receiving COMMITs outside of its
    watermarks and so stashes them. Also this node is slow in receiving
    CHECKPOINTs. So a catch-up does not occur on it.

    Then the slow node eventually receives the sent PRE-PREPAREs and PREPAREs
    and so orders the 3PC-batches between its watermarks. The other nodes
    discard the COMMITs from the slow node since they have already achieved
    stable checkpoints for these COMMITs.

    After that the slow node eventually receives the sent CHECKPOINTs from
    the other nodes and so stabilizes own completed checkpoints and updates its
    watermarks. A catch-up is not triggered because no received checkpoints are
    stashed. Since now the watermarks have been updated, the slow node
    processes 3PC-messages stashed earlier and its ledger becomes equal to the
    ledgers of the other nodes.
    """
    backupInstId = 1
    npr = getNonPrimaryReplicas(txnPoolNodeSet, backupInstId)

    slowReplica = npr[0]
    slowNode = slowReplica.node

    slowNode.nodeIbStasher.delay(ppDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(pDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(chk_delay(300))

    initialDomainLedgerSize = slowNode.domainLedger.size
    oldStashCount = slowReplica.spylog.count(TestReplica.stashOutsideWatermarks.__name__)

    # 1. Send requests more than fit between the watermarks on the slow node
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_logsize + 2)

    # Verify that the slow node stashes the batches outside of its watermarks
    newStashCount = slowReplica.spylog.count(TestReplica.stashOutsideWatermarks.__name__)
    assert newStashCount > oldStashCount

    oldDiscardCounts = discardCounts([n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
                                     'achieved stable checkpoint')

    # 2. Deliver the sent PREPREPAREs and PREPAREs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(PREPREPARE, PREPARE)

    # Verify that the slow node orders the 3PC-batches between its watermarks
    # but no more.
    looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize

    # Also verify that the other nodes discard the COMMITs from the slow node
    # since they have already achieved stable checkpoints for these COMMITs.
    counts = discardCounts(
        [n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
        'achieved stable checkpoint')
    for nm, count in counts.items():
        assert count > oldDiscardCounts[nm]

    oldCatchupTimes = slowNode.spylog.count(Node.start_catchup)

    # 3. Deliver the sent CHECKPOINTs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(CHECKPOINT)

    # Verify that the slow node processes 3PC-messages stashed earlier and its
    # ledger becomes equal to the ledgers of the other nodes while a catch-up
    # is not made.
    waitNodeDataEquality(looper, slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize + 2
    newCatchupTimes = slowNode.spylog.count(Node.start_catchup)
    assert newCatchupTimes == oldCatchupTimes