コード例 #1
0
def test_apply_data(fake_node,
                    fake_node_observer,
                    txnPoolNodeSet,
                    observed_data_msgs):
    '''
    - Create a Node (do not add it to the pool) with Observer.
    - Send txns and get ObservedData msgs from Validators.
    - Apply the ObservedData masgs by the Observer and make sure that it becomes synced with the pool.
    '''

    # check that Observer is not synced with the pool
    checkNodeDataForInequality(fake_node,
                               *txnPoolNodeSet,
                               exclude_from_check=['check_last_ordered_3pc',
                                                   'check_seqno_db'])

    # emulate sending of ObserverData from each Node
    for observed_data_msg in observed_data_msgs:
        for node in txnPoolNodeSet:
            fake_node_observer.apply_data(observed_data_msg, node.name)

    # check that Observer is synced with the pool
    checkNodeDataForEquality(fake_node,
                             *txnPoolNodeSet,
                             exclude_from_check=['check_last_ordered_3pc',
                                                 'check_primaries',
                                                 'check_last_ordered_3pc_backup'])
コード例 #2
0
def test_no_preprepare_requested(looper, txnPoolNodeSet, client1,
                                 wallet1, client1Connected, teardown):
    """
    Node missing Propagates hence request not finalised, hence stashes
    PRE-PREPARE but does not request PRE-PREPARE on receiving PREPARE
    """
    slow_node, other_nodes, _, _ = split_nodes(txnPoolNodeSet)
    slow_node.nodeIbStasher.delay(ppgDelay(20))
    slow_node.nodeIbStasher.delay(msg_rep_delay(20, [PROPAGATE, ]))

    old_count_resp = count_requested_preprepare_resp(slow_node)
    send_reqs_batches_and_get_suff_replies(looper, wallet1, client1, 4, 2)

    # The slow node is behind
    checkNodeDataForInequality(slow_node, *other_nodes)

    # PRE-PREPARE were not requested
    assert count_requested_preprepare_resp(slow_node) == old_count_resp

    slow_node.nodeIbStasher.reset_delays_and_process_delayeds()

    # The slow node has processed all requests
    waitNodeDataEquality(looper, slow_node, *other_nodes)

    # PRE-PREPARE were not requested
    assert count_requested_preprepare_resp(slow_node) == old_count_resp
コード例 #3
0
def test_catch_up_after_demoted(txnPoolNodeSet,
                                nodeSetWithNodeAddedAfterSomeTxns):
    logger.info(
        "1. add a new node after sending some txns and check that catch-up "
        "is done (the new node is up to date)")
    looper, newNode, client, wallet, newStewardClient, \
    newStewardWallet = nodeSetWithNodeAddedAfterSomeTxns
    waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:4])

    logger.info("2. turn the new node off (demote)")
    node_data = {ALIAS: newNode.name, SERVICES: []}
    updateNodeData(looper, newStewardClient, newStewardWallet, newNode,
                   node_data)

    logger.info("3. send more requests, "
                "so that the new node's state is outdated")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    checkNodeDataForInequality(newNode, *txnPoolNodeSet[:-1])

    logger.info("4. turn the new node on")
    node_data = {ALIAS: newNode.name, SERVICES: [VALIDATOR]}
    updateNodeData(looper, newStewardClient, newStewardWallet, newNode,
                   node_data)

    logger.info("5. make sure catch-up is done "
                "(the new node is up to date again)")
    waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])

    logger.info("6. send more requests and make sure "
                "that the new node participates in processing them")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 10)
    waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])
コード例 #4
0
def test_apply_data(fake_node,
                    fake_node_observer,
                    txnPoolNodeSet,
                    observed_data_msgs):
    '''
    - Create a Node (do not add it to the pool) with Observer.
    - Send txns and get ObservedData msgs from Validators.
    - Apply the ObservedData masgs by the Observer and make sure that it becomes synced with the pool.
    '''

    # check that Observer is not synced with the pool
    checkNodeDataForInequality(fake_node,
                               *txnPoolNodeSet,
                               exclude_from_check=['check_last_ordered_3pc',
                                                   'check_seqno_db'])

    # emulate sending of ObserverData from each Node
    for observed_data_msg in observed_data_msgs:
        for node in txnPoolNodeSet:
            fake_node_observer.apply_data(observed_data_msg, node.name)

    # check that Observer is synced with the pool
    checkNodeDataForEquality(fake_node,
                             *txnPoolNodeSet,
                             exclude_from_check=['check_last_ordered_3pc'])
コード例 #5
0
def test_no_preprepare_requested(looper, txnPoolNodeSet, sdk_wallet_client,
                                 sdk_pool_handle, teardown):
    """
    Node missing Propagates hence request not finalised, hence stashes
    PRE-PREPARE but does not request PRE-PREPARE on receiving PREPARE
    """
    slow_node, other_nodes, _, _ = split_nodes(txnPoolNodeSet)
    slow_node.nodeIbStasher.delay(ppgDelay())
    slow_node.clientIbStasher.delay(req_delay())
    slow_node.nodeIbStasher.delay(msg_rep_delay(1000, [
        PROPAGATE,
    ]))

    old_count_resp = count_requested_preprepare_resp(slow_node)

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=4,
                                         num_batches=2)

    # The slow node is behind
    checkNodeDataForInequality(slow_node, *other_nodes)

    # PRE-PREPARE were not requested
    assert count_requested_preprepare_resp(slow_node) == old_count_resp

    slow_node.nodeIbStasher.reset_delays_and_process_delayeds()

    # The slow node has processed all requests
    waitNodeDataEquality(looper, slow_node, *other_nodes)

    # PRE-PREPARE were not requested
    assert count_requested_preprepare_resp(slow_node) == old_count_resp
コード例 #6
0
def test_no_preprepare_requested(looper, txnPoolNodeSet,
                                 sdk_wallet_client, sdk_pool_handle,
                                 teardown):
    """
    Node missing Propagates hence request not finalised, hence stashes
    PRE-PREPARE but does not request PRE-PREPARE on receiving PREPARE
    """
    slow_node, other_nodes, _, _ = split_nodes(txnPoolNodeSet)
    slow_node.nodeIbStasher.delay(ppgDelay(20))
    slow_node.nodeIbStasher.delay(msg_rep_delay(20, [PROPAGATE, ]))

    old_count_resp = count_requested_preprepare_resp(slow_node)

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=4,
                                         num_batches=2)

    # The slow node is behind
    checkNodeDataForInequality(slow_node, *other_nodes)

    # PRE-PREPARE were not requested
    assert count_requested_preprepare_resp(slow_node) == old_count_resp

    slow_node.nodeIbStasher.reset_delays_and_process_delayeds()

    # The slow node has processed all requests
    waitNodeDataEquality(looper, slow_node, *other_nodes)

    # PRE-PREPARE were not requested
    assert count_requested_preprepare_resp(slow_node) == old_count_resp
コード例 #7
0
def test_lag_size_for_catchup(looper, chkFreqPatched, reqs_for_checkpoint,
                              txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client):
    """
    Verifies that if the stored own checkpoints have aligned bounds then
    the master replica lag which makes the node perform catch-up is
    Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed received
    checkpoints.
    """
    slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[-1].node
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]

    # The master replica of the slow node stops to receive 3PC-messages
    slow_node.master_replica.threePhaseRouter.extend((
        (PrePrepare, lambda *x, **y: None),
        (Prepare, lambda *x, **y: None),
        (Commit, lambda *x, **y: None),
    ))

    completed_catchups_before_reqs = get_number_of_completed_catchups(
        slow_node)

    # Send requests for the slow node's master replica to get
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP quorumed stashed checkpoints
    # from others
    send_reqs_batches_and_get_suff_replies(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
        Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP * reqs_for_checkpoint)

    # Give time for the slow node to catch up if it is going to do it
    looper.runFor(
        waits.expectedPoolConsistencyProof(len(txnPoolNodeSet)) +
        waits.expectedPoolCatchupTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(slow_node, *other_nodes)

    # Verify that the slow node has not caught up
    assert get_number_of_completed_catchups(
        slow_node) == completed_catchups_before_reqs

    # Send more requests for the slow node's master replica to reach
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed
    # checkpoints from others
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle, sdk_wallet_client,
                                           reqs_for_checkpoint)

    waitNodeDataEquality(looper,
                         slow_node,
                         *other_nodes,
                         exclude_from_check=['check_last_ordered_3pc_backup'])

    # Verify that the slow node has caught up
    assert get_number_of_completed_catchups(
        slow_node) > completed_catchups_before_reqs
コード例 #8
0
def test_lag_size_for_catchup(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_client):
    """
    Verifies that if the stored own checkpoints have aligned bounds then
    the master replica lag which makes the node perform catch-up is
    Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed received
    checkpoints.
    """
    slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[-1].node
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]

    # The master replica of the slow node stops to receive 3PC-messages
    slow_node.master_replica.threePhaseRouter.extend(
        (
            (PrePrepare, lambda *x, **y: None),
            (Prepare, lambda *x, **y: None),
            (Commit, lambda *x, **y: None),
        )
    )

    completed_catchups_before_reqs = get_number_of_completed_catchups(slow_node)

    # Send requests for the slow node's master replica to get
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP quorumed stashed checkpoints
    # from others
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
                                           reqs_for_checkpoint)

    # Give time for the slow node to catch up if it is going to do it
    looper.runFor(waits.expectedPoolConsistencyProof(len(txnPoolNodeSet)) +
                  waits.expectedPoolCatchupTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(slow_node, *other_nodes)

    # Verify that the slow node has not caught up
    assert get_number_of_completed_catchups(slow_node) == completed_catchups_before_reqs

    # Send more requests for the slow node's master replica to reach
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed
    # checkpoints from others
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           reqs_for_checkpoint)

    waitNodeDataEquality(looper, slow_node, *other_nodes)

    # Verify that the slow node has caught up
    assert get_number_of_completed_catchups(slow_node) > completed_catchups_before_reqs
コード例 #9
0
def test_catch_up_after_demoted(txnPoolNodeSet,
                                sdk_node_set_with_node_added_after_some_txns,
                                sdk_wallet_client):
    logger.info(
        "1. add a new node after sending some txns and check that catch-up "
        "is done (the new node is up to date)")
    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_set_with_node_added_after_some_txns
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4])

    logger.info("2. turn the new node off (demote)")
    node_dest = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper,
                         new_steward_wallet_handle,
                         sdk_pool_handle,
                         node_dest,
                         new_node.name,
                         None,
                         None,
                         None,
                         None,
                         services=[])

    logger.info("3. send more requests, "
                "so that the new node's state is outdated")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 5)
    checkNodeDataForInequality(new_node, *txnPoolNodeSet[:-1])

    logger.info("4. turn the new node on")
    sdk_send_update_node(looper,
                         new_steward_wallet_handle,
                         sdk_pool_handle,
                         node_dest,
                         new_node.name,
                         None,
                         None,
                         None,
                         None,
                         services=[VALIDATOR])

    logger.info("5. make sure catch-up is done "
                "(the new node is up to date again)")
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])

    logger.info("6. send more requests and make sure "
                "that the new node participates in processing them")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 10)
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
コード例 #10
0
def test_catch_up_after_demoted(
        txnPoolNodeSet,
        sdk_node_set_with_node_added_after_some_txns,
        sdk_wallet_client):
    logger.info(
        "1. add a new node after sending some txns and check that catch-up "
        "is done (the new node is up to date)")
    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_set_with_node_added_after_some_txns
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:4])

    logger.info("2. turn the new node off (demote)")
    node_dest = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper, new_steward_wallet_handle,
                         sdk_pool_handle,
                         node_dest, new_node.name,
                         None, None,
                         None, None,
                         services=[])

    logger.info("3. send more requests, "
                "so that the new node's state is outdated")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 5)
    checkNodeDataForInequality(new_node, *txnPoolNodeSet[:-1])

    logger.info("4. turn the new node on")
    sdk_send_update_node(looper, new_steward_wallet_handle,
                         sdk_pool_handle,
                         node_dest, new_node.name,
                         None, None,
                         None, None,
                         services=[VALIDATOR])

    logger.info("5. make sure catch-up is done "
                "(the new node is up to date again)")
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])

    logger.info("6. send more requests and make sure "
                "that the new node participates in processing them")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 10)
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
コード例 #11
0
def test_non_primary_recvs_3phase_message_outside_watermarks(
        chkFreqPatched, reqs_for_logsize, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    A node is slow in receiving PRE-PREPAREs and PREPAREs. A lot of requests
    are sent and the slow node has started receiving COMMITs outside of its
    watermarks and so stashes them. Also this node is slow in receiving
    CHECKPOINTs. So a catch-up does not occur on it.

    Then the slow node eventually receives the sent PRE-PREPAREs and PREPAREs
    and so orders the 3PC-batches between its watermarks. The other nodes
    discard the COMMITs from the slow node since they have already achieved
    stable checkpoints for these COMMITs.

    After that the slow node eventually receives the sent CHECKPOINTs from
    the other nodes and so stabilizes own completed checkpoints and updates its
    watermarks. A catch-up is not triggered because no received checkpoints are
    stashed. Since now the watermarks have been updated, the slow node
    processes 3PC-messages stashed earlier and its ledger becomes equal to the
    ledgers of the other nodes.
    """
    backupInstId = 1
    npr = getNonPrimaryReplicas(txnPoolNodeSet, backupInstId)

    slowReplica = npr[0]
    slowNode = slowReplica.node

    slowNode.nodeIbStasher.delay(ppDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(pDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(chk_delay(300))

    initialDomainLedgerSize = slowNode.domainLedger.size
    oldStashCount = slowReplica.stasher.num_stashed_watermarks
    slowReplica.H = LOG_SIZE
    # 1. Send requests more than fit between the watermarks on the slow node
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_logsize + 2)

    # Verify that the slow node stashes the batches outside of its watermarks
    newStashCount = slowReplica.stasher.num_stashed_watermarks
    assert newStashCount > oldStashCount

    oldDiscardCounts = discardCounts([n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
                                     'achieved stable checkpoint')

    # 2. Deliver the sent PREPREPAREs and PREPAREs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(PREPREPARE, PREPARE)

    # Verify that the slow node orders the 3PC-batches between its watermarks
    # but no more.
    looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize

    # Also verify that the other nodes discard the COMMITs from the slow node
    # since they have already achieved stable checkpoints for these COMMITs.
    counts = discardCounts(
        [n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
        'achieved stable checkpoint')
    for nm, count in counts.items():
        assert count > oldDiscardCounts[nm]

    oldCatchupTimes = slowNode.spylog.count(Node.start_catchup)

    # 3. Deliver the sent CHECKPOINTs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(CHECKPOINT)

    # Verify that the slow node processes 3PC-messages stashed earlier and its
    # ledger becomes equal to the ledgers of the other nodes while a catch-up
    # is not made.
    waitNodeDataEquality(looper, slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize + 2
    newCatchupTimes = slowNode.spylog.count(Node.start_catchup)
    assert newCatchupTimes == oldCatchupTimes
コード例 #12
0
def test_complete_short_checkpoint_not_included_in_lag_for_catchup(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client,
        tdir, tconf, allPluginsPath):
    """
    Verifies that if the first stored own checkpoint has a not aligned lower
    bound (this means that it was started after a catch-up), is complete
    and there is a quorumed stashed checkpoint from other replicas with
    the same end then this stashed checkpoint is not included into the lag
    for a catch-up, i.e. in such a case the lag which makes the node perform
    catch-up is Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 2 quorumed stashed
    received checkpoints.
    """
    max_batch_size = chkFreqPatched.Max3PCBatchSize

    _, new_node = sdk_add_new_steward_and_node(
        looper, sdk_pool_handle, sdk_wallet_steward,
        'EpsilonSteward', 'Epsilon', tdir, tconf,
        allPluginsPath=allPluginsPath)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
    # Epsilon did not participate in ordering of the batch with EpsilonSteward
    # NYM transaction and the batch with Epsilon NODE transaction.
    # Epsilon got these transactions via catch-up.

    # To complete the first checkpoint send requests for 1 checkpoint minus
    # 2 3PC-batches (since there are already 2 3PC-batches in the first
    # checkpoint : with EpsilonSteward NYM transaction and with Epsilon NODE
    # transaction). This checkpoint has a not aligned lower bound
    # on the new node replicas so it will not be stabilized on them.
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           reqs_for_checkpoint - 2 * max_batch_size)

    # The master replica of the new node stops to receive 3PC-messages
    new_node.master_replica.threePhaseRouter.extend(
        (
            (PrePrepare, lambda *x, **y: None),
            (Prepare, lambda *x, **y: None),
            (Commit, lambda *x, **y: None),
        )
    )

    completed_catchups_before_reqs = get_number_of_completed_catchups(new_node)

    # Send requests for the new node's master replica to reach
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed
    # checkpoints from others
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
                                           reqs_for_checkpoint)

    # Give time for the new node to catch up if it is going to do it
    looper.runFor(waits.expectedPoolConsistencyProof(len(txnPoolNodeSet)) +
                  waits.expectedPoolCatchupTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(new_node, *txnPoolNodeSet[:-1])

    # Verify that the new node has not caught up
    assert get_number_of_completed_catchups(new_node) == completed_catchups_before_reqs

    # Send more requests for the new node's master replica to reach
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 2 quorumed stashed
    # checkpoints from others
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           reqs_for_checkpoint)

    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])

    # Verify that the new node has caught up
    assert get_number_of_completed_catchups(new_node) > completed_catchups_before_reqs
コード例 #13
0
def test_non_primary_recvs_3phase_message_outside_watermarks(
        chkFreqPatched, reqs_for_logsize, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    A node is slow in receiving PRE-PREPAREs and PREPAREs. A lot of requests
    are sent and the slow node has started receiving COMMITs outside of its
    watermarks and so stashes them. Also this node is slow in receiving
    CHECKPOINTs. So a catch-up does not occur on it.

    Then the slow node eventually receives the sent PRE-PREPAREs and PREPAREs
    and so orders the 3PC-batches between its watermarks. The other nodes
    discard the COMMITs from the slow node since they have already achieved
    stable checkpoints for these COMMITs.

    After that the slow node eventually receives the sent CHECKPOINTs from
    the other nodes and so stabilizes own completed checkpoints and updates its
    watermarks. A catch-up is not triggered because no received checkpoints are
    stashed. Since now the watermarks have been updated, the slow node
    processes 3PC-messages stashed earlier and its ledger becomes equal to the
    ledgers of the other nodes.
    """
    backupInstId = 1
    npr = getNonPrimaryReplicas(txnPoolNodeSet, backupInstId)

    slowReplica = npr[0]
    slowNode = slowReplica.node

    slowNode.nodeIbStasher.delay(ppDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(pDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(chk_delay(300))

    initialDomainLedgerSize = slowNode.domainLedger.size
    oldStashCount = slowReplica.spylog.count(TestReplica.stashOutsideWatermarks.__name__)

    # 1. Send requests more than fit between the watermarks on the slow node
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_logsize + 2)

    # Verify that the slow node stashes the batches outside of its watermarks
    newStashCount = slowReplica.spylog.count(TestReplica.stashOutsideWatermarks.__name__)
    assert newStashCount > oldStashCount

    oldDiscardCounts = discardCounts([n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
                                     'achieved stable checkpoint')

    # 2. Deliver the sent PREPREPAREs and PREPAREs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(PREPREPARE, PREPARE)

    # Verify that the slow node orders the 3PC-batches between its watermarks
    # but no more.
    looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize

    # Also verify that the other nodes discard the COMMITs from the slow node
    # since they have already achieved stable checkpoints for these COMMITs.
    counts = discardCounts(
        [n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
        'achieved stable checkpoint')
    for nm, count in counts.items():
        assert count > oldDiscardCounts[nm]

    oldCatchupTimes = slowNode.spylog.count(Node.start_catchup)

    # 3. Deliver the sent CHECKPOINTs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(CHECKPOINT)

    # Verify that the slow node processes 3PC-messages stashed earlier and its
    # ledger becomes equal to the ledgers of the other nodes while a catch-up
    # is not made.
    waitNodeDataEquality(looper, slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize + 2
    newCatchupTimes = slowNode.spylog.count(Node.start_catchup)
    assert newCatchupTimes == oldCatchupTimes
def test_incomplete_short_checkpoint_included_in_lag_for_catchup(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, sdk_wallet_client,
        tdir, tconf, allPluginsPath):
    """
    Verifies that if the first stored own checkpoint has a not aligned lower
    bound (this means that it was started after a catch-up), is incomplete
    and there is a quorumed stashed checkpoint from other replicas with
    the same end then this stashed checkpoint is included into the lag
    for a catch-up, i.e. in such a case the lag which makes the node perform
    catch-up is Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed
    received checkpoints.
    """
    max_batch_size = chkFreqPatched.Max3PCBatchSize

    _, new_node = sdk_add_new_steward_and_node(
        looper, sdk_pool_handle, sdk_wallet_steward,
        'EpsilonSteward', 'Epsilon', tdir, tconf,
        allPluginsPath=allPluginsPath)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
    # Epsilon did not participate in ordering of the batch with EpsilonSteward
    # NYM transaction and the batch with Epsilon NODE transaction.
    # Epsilon got these transactions via catch-up.

    # Send some requests but not enough to complete the first checkpoint.
    # Note that there are already 2 3PC-batches in the first checkpoint:
    # with EpsilonSteward NYM transaction and with Epsilon NODE transaction.
    # This checkpoint has a not aligned lower bound on the new node replicas.
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           reqs_for_checkpoint - 3 * max_batch_size)

    # The master replica of the new node stops to receive 3PC-messages
    new_node.master_replica.threePhaseRouter.extend(
        (
            (PrePrepare, lambda *x, **y: None),
            (Prepare, lambda *x, **y: None),
            (Commit, lambda *x, **y: None),
        )
    )

    completed_catchups_before_reqs = get_number_of_completed_catchups(new_node)

    # Send requests for the new node's master replica to reach
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP quorumed stashed
    # checkpoints from others
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP - 1) *
                                           reqs_for_checkpoint + max_batch_size)

    # Give time for the new node to catch up if it is going to do it
    looper.runFor(waits.expectedPoolConsistencyProof(len(txnPoolNodeSet)) +
                  waits.expectedPoolCatchupTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(new_node, *txnPoolNodeSet[:-1])

    # Verify that the new node has not caught up
    assert get_number_of_completed_catchups(new_node) == completed_catchups_before_reqs

    # Send more requests for the new node's master replica to reach
    # Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1 quorumed stashed
    # checkpoints from others
    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           reqs_for_checkpoint)

    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])

    # Verify that the new node has caught up
    assert get_number_of_completed_catchups(new_node) > completed_catchups_before_reqs