def test_backup_can_order_after_catchup(txnPoolNodeSet, looper,
                                        sdk_pool_handle, sdk_wallet_client):
    # We expect that after VC Gamma will be primary on backup
    delayed_node = txnPoolNodeSet[-2]
    with delay_rules_without_processing(delayed_node.nodeIbStasher,
                                        pDelay(instId=MASTER_REPLICA_INDEX),
                                        cDelay(instId=MASTER_REPLICA_INDEX),
                                        ppDelay(instId=MASTER_REPLICA_INDEX)):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, REQUEST_COUNT)
        with delay_rules_without_processing(
            [n.nodeIbStasher for n in txnPoolNodeSet],
                old_view_pp_request_delay()):
            ensure_view_change(looper, txnPoolNodeSet)
            ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
            assert delayed_node.replicas._replicas[BACKUP_INST_ID].isPrimary
            # Check, that backup cannot order
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, REQUEST_COUNT)
            for n in txnPoolNodeSet:
                assert n.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[
                    1] == 0
            # Forcing catchup
            delayed_node.start_catchup()
            ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

            # Check, that backup can order after catchup
            b_pp_seq_no_before = delayed_node.replicas._replicas[
                BACKUP_INST_ID].last_ordered_3pc[1]
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, REQUEST_COUNT)
            assert delayed_node.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == \
                   b_pp_seq_no_before + REQUEST_COUNT
def test_backups_dont_order_while_reordering(txnPoolNodeSet,
                                             sdk_pool_handle,
                                             sdk_wallet_client,
                                             looper):
    """
    This test needs to show that for now we stop ordering on backups
    until master in reordering state after view_change
    Steps:
    1. Delay ordering on master replica for collecting requests to reorder after VC
    2. Make sure that master didn't order
    3. Delay old_view_pp_request and force VC
    4. Ensure that all backup replica on all nodes cannot order
       because primary waiting for reordering on master
    """

    def check_pp_count(node, expected_count, inst_id=0):
        assert node.replicas._replicas[inst_id].last_ordered_3pc[1] == expected_count, \
            "master last ordered: {}, backup_last_ordered: {}".format(node.master_replica._ordering_service.batches,
                                                                      node.replicas._replicas[
                                                                          1]._ordering_service.batches)

    # We expect that after VC Gamma will be primary on backup
    delayed_node = txnPoolNodeSet[-2]
    fast_nodes = [n for n in txnPoolNodeSet if n != delayed_node]
    master_pp_seq_no_before = delayed_node.master_replica.last_ordered_3pc[1]
    with delay_rules_without_processing(delayed_node.nodeIbStasher,
                                        pDelay(instId=MASTER_REPLICA_INDEX),
                                        cDelay(instId=MASTER_REPLICA_INDEX),
                                        msg_req_delay(),
                                        msg_rep_delay(),
                                        ppDelay(instId=MASTER_REPLICA_INDEX)):
        sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING)
        looper.run(eventually(check_pp_count, delayed_node, REQS_FOR_REORDERING, BACKUP_INST_ID))
        assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before
        with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], old_view_pp_request_delay()):
            ensure_view_change(looper, txnPoolNodeSet)

            # check that view change is finished on all nodes
            looper.run(eventually(check_not_in_view_change, txnPoolNodeSet))

            # check that delayed node is selected on all fast nodes but not on delayed node
            def check_backup_primaries():
                assert delayed_node.replicas[BACKUP_INST_ID]._consensus_data.primary_name is None
                assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before
                assert all(
                    n.replicas[BACKUP_INST_ID]._consensus_data.primary_name == generateName(delayed_node.name,
                                                                                            instId=BACKUP_INST_ID)
                    for n in fast_nodes
                )

            looper.run(eventually(check_backup_primaries))

            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING)
            for node in txnPoolNodeSet:
                assert node.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == 0
def test_old_view_pre_prepare_reply_processing(looper, txnPoolNodeSet, tconf,
                                               allPluginsPath, sdk_pool_handle,
                                               sdk_wallet_steward,
                                               monkeypatch):
    """
    Test steps:
    Delay PrePrepares on `slow_node` (without processing)
    Delay receiving of OldViewPrePrepareRequest on all nodes but `malicious_node`
    Patch sending for OldViewPrePrepareReply on the `malicious_node` to send an invalid PrePrepare
    Start a view change
    Make sure it's finished on all nodes excluding `slow_node`
    Make sure that the lagging node received OldViewPrePrepareReply from the malicious node
    Reset delay for OldViewPrePrepareRequest  on other nodes
    Make sure the pool is functional and all nodes have same data
    """
    start_view_no = txnPoolNodeSet[0].viewNo
    slow_node = txnPoolNodeSet[-2]
    malicious_node = txnPoolNodeSet[-1]
    other_nodes = [
        n for n in txnPoolNodeSet if n not in [slow_node, malicious_node]
    ]
    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        customTimeout=tconf.NEW_VIEW_TIMEOUT)
    timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet))
    ensure_all_nodes_have_same_data(looper,
                                    txnPoolNodeSet,
                                    custom_timeout=timeout)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    with delay_rules_without_processing(
            slow_node.nodeIbStasher, ppDelay(),
            msg_rep_delay(types_to_delay=[PREPREPARE])):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_steward, 1)
    with delay_rules([n.nodeIbStasher for n in other_nodes],
                     old_view_pp_request_delay()):
        old_sender = malicious_node.master_replica._ordering_service._send

        def patched_sender(msg, dst=None, stat=None):
            if isinstance(msg, OldViewPrePrepareReply) and msg.preprepares:
                pp_dict = msg.preprepares[0]._asdict()
                pp_dict["digest"] = "incorrect_digest"
                pp = PrePrepare(**pp_dict)
                msg.preprepares[0] = pp
                monkeypatch.undo()
            old_sender(msg, dst, stat)

        monkeypatch.setattr(malicious_node.master_replica._ordering_service,
                            '_send', patched_sender)
        monkeypatch.setattr(slow_node.master_replica._ordering_service,
                            '_validate_applied_pre_prepare',
                            lambda a, b, c: None)
        process_old_pp_num = slow_node.master_replica._ordering_service.spylog.count(
            OrderingService.process_old_view_preprepare_reply)

        for n in txnPoolNodeSet:
            n.view_changer.on_master_degradation()

        waitForViewChange(looper,
                          other_nodes + [malicious_node],
                          expectedViewNo=start_view_no + 1)

        ensureElectionsDone(looper=looper,
                            nodes=other_nodes + [malicious_node],
                            instances_list=[0, 1, 2])
        ensure_all_nodes_have_same_data(looper,
                                        nodes=other_nodes + [malicious_node])

        def chk():
            assert process_old_pp_num + 1 == slow_node.master_replica._ordering_service.spylog.count(
                OrderingService.process_old_view_preprepare_reply)

        looper.run(eventually(chk))

    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward,
                               sdk_pool_handle)