def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, sdk_pool_handle,
                                       sdk_wallet_client, monkeypatch,
                                       chkFreqPatched, reqs_for_checkpoint):
    """
    Checks that transactions received by catchup do not
    break performance monitoring
    """

    max_batch_size = chkFreqPatched.Max3PCBatchSize
    lagging_node = get_any_non_primary_node(txnPoolNodeSet)
    rest_nodes = set(txnPoolNodeSet).difference({lagging_node})

    # Make master replica lagging by hiding all messages sent to it
    make_master_replica_lag(lagging_node)
    monkeypatch.setattr(lagging_node.master_replica,
                        '_request_missing_three_phase_messages',
                        lambda *x, **y: None)

    # Send some requests and check that all replicas except master executed it
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client,
                              reqs_for_checkpoint - max_batch_size)
    waitNodeDataInequality(looper, lagging_node, *rest_nodes)
    looper.run(eventually(backup_replicas_run_forward, lagging_node))

    assert not lagging_node.monitor.isMasterDegraded()

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client,
                              reqs_for_checkpoint + max_batch_size)
    # Check that catchup done
    waitNodeDataEquality(looper, lagging_node, *rest_nodes)

    lagging_node.reset_delays_and_process_delayeds()

    # Send some more requests to ensure that backup and master replicas
    # are in the same state
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client,
                              reqs_for_checkpoint - max_batch_size)
    looper.run(eventually(replicas_synced, lagging_node))

    # Check that master is not considered to be degraded
    assert not lagging_node.monitor.isMasterDegraded()
def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, sdk_pool_handle,
                                       sdk_wallet_client, monkeypatch,
                                       chkFreqPatched, reqs_for_checkpoint):
    """
    Checks that transactions received by catchup do not
    break performance monitoring
    """

    max_batch_size = chkFreqPatched.Max3PCBatchSize
    lagging_node = get_any_non_primary_node(txnPoolNodeSet)
    rest_nodes = set(txnPoolNodeSet).difference({lagging_node})

    # Make master replica lagging by hiding all messages sent to it
    make_master_replica_lag(lagging_node)
    monkeypatch.setattr(lagging_node.master_replica,
                        '_request_missing_three_phase_messages',
                        lambda *x, **y: None)

    # Send some requests and check that all replicas except master executed it
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client,
                              reqs_for_checkpoint - max_batch_size)
    waitNodeDataInequality(looper, lagging_node, *rest_nodes)
    looper.run(eventually(backup_replicas_run_forward, lagging_node))

    assert not lagging_node.monitor.isMasterDegraded()

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client,
                              reqs_for_checkpoint + max_batch_size)
    # Check that catchup done
    waitNodeDataEquality(looper, lagging_node, *rest_nodes)

    lagging_node.reset_delays_and_process_delayeds()

    # Send some more requests to ensure that backup and master replicas
    # are in the same state
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client,
                              reqs_for_checkpoint - max_batch_size)
    looper.run(eventually(replicas_synced, lagging_node))

    # Check that master is not considered to be degraded
    assert not lagging_node.monitor.isMasterDegraded()
def test_node_catchup_causes_no_desync(looper, txnPoolNodeSet, client1,
                                       wallet1, client1Connected, monkeypatch):
    """
    Checks that transactions received by catchup do not
    break performance monitoring
    """

    client, wallet = client1, wallet1
    lagging_node = get_any_non_primary_node(txnPoolNodeSet)
    rest_nodes = set(txnPoolNodeSet).difference({lagging_node})

    # Make master replica lagging by hiding all messages sent to it
    make_master_replica_lag(lagging_node)
    monkeypatch.setattr(lagging_node.master_replica,
                        '_request_missing_three_phase_messages',
                        lambda *x, **y: None)

    # Send some requests and check that all replicas except master executed it
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    waitNodeDataInequality(looper, lagging_node, *rest_nodes)
    looper.run(eventually(backup_replicas_run_forward, lagging_node))

    # Disconnect lagging node, send some more requests and start it back
    # After start it should fall in a such state that it needs to make catchup
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            lagging_node,
                                            stopNode=False)
    looper.removeProdable(lagging_node)
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    looper.add(lagging_node)
    reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, lagging_node)

    # Check that catchup done
    waitNodeDataEquality(looper, lagging_node, *rest_nodes)

    # Send some more requests to ensure that backup and master replicas
    # are in the same state
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    looper.run(eventually(replicas_synced, lagging_node))

    # Check that master is not considered to be degraded
    assert not lagging_node.monitor.isMasterDegraded()