def test_check_cdp_pp_storages(looper, txnPoolNodeSet, sdk_pool_handle,
                               sdk_wallet_client):
    def check_all_empty(replica):
        assert not bool(replica._consensus_data.preprepared)
        assert not bool(replica._consensus_data.prepared)

    def check_preprepared_not_empty(replica):
        assert bool(replica._consensus_data.preprepared)

    def check_prepared_not_empty(replica):
        assert bool(replica._consensus_data.prepared)

    def operation_for_replicas(operation, node_set=txnPoolNodeSet):
        for node in node_set:
            operation(node.master_replica)

    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]

    with delay_rules(node_stashers, pDelay()):
        with delay_rules(node_stashers, ppDelay()):
            sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
            looper.run(
                eventually(operation_for_replicas, check_all_empty,
                           txnPoolNodeSet[1:]))
            looper.run(
                eventually(operation_for_replicas, check_preprepared_not_empty,
                           txnPoolNodeSet[0:1]))
        looper.run(
            eventually(operation_for_replicas, check_preprepared_not_empty,
                       txnPoolNodeSet))
    looper.run(
        eventually(operation_for_replicas, check_prepared_not_empty,
                   txnPoolNodeSet))
def test_view_change_with_different_prepare_certificate(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    Check that a node without pre-prepare but with quorum of prepares wouldn't
    use this transaction as a last in prepare certificate
    """
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    slow_node = txnPoolNodeSet[-1]
    # delay preprepares and message response with preprepares.
    with delay_rules(slow_node.nodeIbStasher, ppDelay(delay=sys.maxsize)):
        with delay_rules(
                slow_node.nodeIbStasher,
                msg_rep_delay(delay=sys.maxsize, types_to_delay=[
                    PREPREPARE,
                ])):
            last_ordered = slow_node.master_replica.last_ordered_3pc
            sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
            looper.run(
                eventually(check_prepare_certificate, txnPoolNodeSet[0:-1],
                           last_ordered[1] + 1))

            for n in txnPoolNodeSet:
                n.view_changer.on_master_degradation()
            assert slow_node.master_replica.last_prepared_certificate_in_view() == \
                   (0, last_ordered[1])
            ensureElectionsDone(looper, txnPoolNodeSet)
def testOrderingCase1(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle):
    """
    Scenario -> PRE-PREPARE not received by the replica, Request not received
    for ordering by the replica, but received enough commits to start ordering.
    It queues up the request so when a PRE-PREPARE is received or request is
    receievd for ordering, an order can be triggered
    https://www.pivotaltracker.com/story/show/125239401

    Reproducing by - Pick a node with no primary replica, replica ignores
    forwarded request to replica and delay reception of PRE-PREPARE sufficiently
    so that enough COMMITs reach to trigger ordering.
    """
    delay = 10
    replica = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0]
    delaysPrePrepareProcessing(replica.node, delay=delay, instId=0)

    def doNotProcessReqDigest(self, _):
        pass

    patchedMethod = types.MethodType(doNotProcessReqDigest, replica)
    replica.processRequest = patchedMethod

    def chk(n):
        assert replica.spylog.count(replica.doOrder.__name__) == n

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    timeout = delay - 5
    looper.run(eventually(chk, 0, retryWait=1, timeout=timeout))
    timeout = delay + 5
    looper.run(eventually(chk, 1, retryWait=1, timeout=timeout))
def test_view_change_with_different_prepare_certificate(looper, txnPoolNodeSet,
                                                        sdk_pool_handle,
                                                        sdk_wallet_client):
    """
    Check that a node without pre-prepare but with quorum of prepares wouldn't
    use this transaction as a last in prepare certificate
    """
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    slow_node = txnPoolNodeSet[-1]
    # delay preprepares and message response with preprepares.
    with delay_rules(slow_node.nodeIbStasher, ppDelay(delay=sys.maxsize)):
        with delay_rules(slow_node.nodeIbStasher,
                         msg_rep_delay(delay=sys.maxsize,
                                       types_to_delay=[PREPREPARE, ])):
            last_ordered = slow_node.master_replica.last_ordered_3pc
            sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
            looper.run(eventually(check_prepare_certificate,
                                  txnPoolNodeSet[0:-1],
                                  last_ordered[1] + 1))

            for n in txnPoolNodeSet:
                n.view_changer.on_master_degradation()
            assert slow_node.master_replica.last_prepared_certificate_in_view() == \
                   (0, last_ordered[1])
            ensureElectionsDone(looper, txnPoolNodeSet)
Exemplo n.º 5
0
def testOrderingCase1(looper, txnPoolNodeSet, sdk_wallet_client,
                      sdk_pool_handle):
    """
    Scenario -> PRE-PREPARE not received by the replica, Request not received
    for ordering by the replica, but received enough commits to start ordering.
    It queues up the request so when a PRE-PREPARE is received or request is
    receievd for ordering, an order can be triggered
    https://www.pivotaltracker.com/story/show/125239401

    Reproducing by - Pick a node with no primary replica, replica ignores
    forwarded request to replica and delay reception of PRE-PREPARE sufficiently
    so that enough COMMITs reach to trigger ordering.
    """
    delay = 10
    replica = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0]
    delaysPrePrepareProcessing(replica.node, delay=delay, instId=0)

    def doNotProcessReqDigest(self, _):
        pass

    patchedMethod = types.MethodType(doNotProcessReqDigest, replica)
    replica.processRequest = patchedMethod

    def chk(n):
        assert replica.spylog.count(replica.doOrder.__name__) == n

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    timeout = delay - 5
    looper.run(eventually(chk, 0, retryWait=1, timeout=timeout))
    timeout = delay + 5
    looper.run(eventually(chk, 1, retryWait=1, timeout=timeout))
Exemplo n.º 6
0
def test_nodes_with_bad_clock(tconf, looper, txnPoolNodeSet,
                              sdk_wallet_client, sdk_pool_handle):
    """
    All nodes have bad clocks but they eventaully get repaired, an example of
    nodes being cut off from NTP server for some time or NTP sync disabled
    then without node restart NTP sync enabled
    """
    sdk_send_random_and_check(looper,
                              txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              count=Max3PCBatchSize * 3)
    ledger_sizes = {node.name: node.domainLedger.size for node in
                    txnPoolNodeSet}
    susp_counts = {node.name: get_timestamp_suspicion_count(node) for node in
                   txnPoolNodeSet}
    for node in txnPoolNodeSet:
        make_clock_faulty(
            node,
            clock_slow_by_sec=node.config.ACCEPTABLE_DEVIATION_PREPREPARE_SECS +
                              randint(
                                  5,
                                  15),
            ppr_always_wrong=False)

    for _ in range(5):
        sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
        looper.runFor(.2)

    # Let some time pass
    looper.runFor(3)

    def chk():
        for node in txnPoolNodeSet:
            # Each node raises suspicion
            assert get_timestamp_suspicion_count(node) > susp_counts[node.name]
            # Ledger does not change
            assert node.domainLedger.size == ledger_sizes[node.name]

    looper.run(eventually(chk, retryWait=1))

    # Fix clocks
    for node in txnPoolNodeSet:
        def utc_epoch(self) -> int:
            return get_utc_epoch()

        node.utc_epoch = types.MethodType(utc_epoch, node)

    # Let some more time pass
    looper.runFor(3)

    # All nodes reply
    sdk_send_random_and_check(looper,
                              txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              count=Max3PCBatchSize * 2)
Exemplo n.º 7
0
def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet,
                                          sdk_wallet_client, sdk_pool_handle):
    """
    Send commits but delay pre-prepare and prepares such that enough
    commits are received, now the request should not be ordered until
    pre-prepare is received and ordering should just happen once,
    """
    delay = 10
    non_prim_reps = getNonPrimaryReplicas(txnPoolNodeSet, 0)

    slow_rep = non_prim_reps[0]
    slow_node = slow_rep.node
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    stash_pp = []
    stash_p = []
    orig_pp_method = slow_rep._ordering_service.process_preprepare
    orig_p_method = slow_rep._ordering_service.process_prepare

    def patched_pp(self, msg, sender):
        stash_pp.append((msg, sender))

    def patched_p(self, msg, sender):
        stash_p.append((msg, sender))

    slow_rep._ordering_service.process_preprepare = \
        types.MethodType(patched_pp, slow_rep)
    slow_rep._ordering_service.process_prepare = \
        types.MethodType(patched_p, slow_rep)

    def chk1():
        assert len(slow_rep._ordering_service.commitsWaitingForPrepare) > 0

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay
    looper.run(eventually(chk1, retryWait=1, timeout=timeout))

    for m, s in stash_pp:
        orig_pp_method(m, s)

    for m, s in stash_p:
        orig_p_method(m, s)

    def chk2():
        assert len(slow_rep._ordering_service.commitsWaitingForPrepare) == 0
        assert slow_rep._ordering_service.spylog.count(
            slow_rep._ordering_service._do_order.__name__) == 1

    timeout = waits.expectedOrderingTime(len(non_prim_reps) + 1) + 2 * delay
    looper.run(eventually(chk2, retryWait=1, timeout=timeout))
def testOrderingWhenPrePrepareNotReceived(looper, txnPoolNodeSet,
                                          sdk_wallet_client, sdk_pool_handle):
    """
    Send commits but delay pre-prepare and prepares such that enough
    commits are received, now the request should not be ordered until
    pre-prepare is received and ordering should just happen once,
    """
    delay = 10
    non_prim_reps = getNonPrimaryReplicas(txnPoolNodeSet, 0)

    slow_rep = non_prim_reps[0]
    slow_node = slow_rep.node
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    stash_pp = []
    stash_p = []
    orig_pp_method = slow_rep.processPrePrepare
    orig_p_method = slow_rep.processPrepare

    def patched_pp(self, msg, sender):
        stash_pp.append((msg, sender))

    def patched_p(self, msg, sender):
        stash_p.append((msg, sender))

    slow_rep.processPrePrepare = \
        types.MethodType(patched_pp, slow_rep)
    slow_rep.processPrepare = \
        types.MethodType(patched_p, slow_rep)

    def chk1():
        assert len(slow_rep.commitsWaitingForPrepare) > 0

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet)) + delay
    looper.run(eventually(chk1, retryWait=1, timeout=timeout))

    for m, s in stash_pp:
        orig_pp_method(m, s)

    for m, s in stash_p:
        orig_p_method(m, s)

    def chk2():
        assert len(slow_rep.commitsWaitingForPrepare) == 0
        assert slow_rep.spylog.count(slow_rep.doOrder.__name__) == 1

    timeout = waits.expectedOrderingTime(len(non_prim_reps) + 1) + 2 * delay
    looper.run(eventually(chk2, retryWait=1, timeout=timeout))
Exemplo n.º 9
0
def do_view_change_with_unaligned_prepare_certificates(
        slow_nodes, nodes, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Perform view change with some nodes reaching lower last prepared certificate than others.
    With current implementation of view change this can result with view change taking a lot of time.
    """
    fast_nodes = [n for n in nodes if n not in slow_nodes]

    all_stashers = [n.nodeIbStasher for n in nodes]
    slow_stashers = [n.nodeIbStasher for n in slow_nodes]

    # Delay some PREPAREs and all COMMITs
    with delay_rules(slow_stashers, pDelay()):
        with delay_rules(all_stashers, cDelay()):
            # Send request
            request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

            # Wait until this request is prepared on fast nodes
            looper.run(eventually(check_last_prepared_certificate, fast_nodes, (0, 1)))
            # Make sure its not prepared on slow nodes
            looper.run(eventually(check_last_prepared_certificate, slow_nodes, None))

            # Trigger view change
            for n in nodes:
                n.view_changer.on_master_degradation()

        # Now commits are processed
        # Wait until view change is complete
        looper.run(eventually(check_view_change_done, nodes, 1, timeout=60))

    # Finish request gracefully
    sdk_get_reply(looper, request)
Exemplo n.º 10
0
def requests(looper, sdk_wallet_client, sdk_pool_handle):
    requests = []
    for i in range(5):
        req = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
        req, _ = sdk_get_reply(looper, req, timeout=sdk_eval_timeout(1, 4))
        requests.append(req)
    return requests
Exemplo n.º 11
0
def do_view_change_with_unaligned_prepare_certificates(
        slow_nodes, nodes, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Perform view change with some nodes reaching lower last prepared certificate than others.
    With current implementation of view change this can result with view change taking a lot of time.
    """
    fast_nodes = [n for n in nodes if n not in slow_nodes]

    all_stashers = [n.nodeIbStasher for n in nodes]
    slow_stashers = [n.nodeIbStasher for n in slow_nodes]

    # Delay some PREPAREs and all COMMITs
    with delay_rules(slow_stashers, pDelay()):
        with delay_rules(all_stashers, cDelay()):
            # Send request
            request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

            # Wait until this request is prepared on fast nodes
            looper.run(eventually(check_last_prepared_certificate, fast_nodes, (0, 1)))
            # Make sure its not prepared on slow nodes
            looper.run(eventually(check_last_prepared_certificate, slow_nodes, None))

            # Trigger view change
            for n in nodes:
                n.view_changer.on_master_degradation()

        # Now commits are processed
        # Wait until view change is complete
        looper.run(eventually(check_view_change_done, nodes, 1, timeout=60))

    # Finish request gracefully
    sdk_get_reply(looper, request)
Exemplo n.º 12
0
def do_view_change_with_propagate_primary_on_one_delayed_node(
        slow_node, nodes, looper, sdk_pool_handle, sdk_wallet_client):

    slow_stasher = slow_node.nodeIbStasher

    fast_nodes = [n for n in nodes if n != slow_node]

    stashers = [n.nodeIbStasher for n in nodes]

    # Get last prepared certificate in pool
    lpc = last_prepared_certificate(nodes)
    # Get pool current view no
    view_no = lpc[0]

    with delay_rules(slow_stasher, icDelay()):
        with delay_rules(slow_stasher, vcd_delay()):
            with delay_rules(stashers, cDelay()):
                # Send request
                request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

                # Wait until this request is prepared on N-f nodes
                looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1)))

                # Trigger view change
                for n in nodes:
                    n.view_changer.on_master_degradation()

                # Wait until view change is completed on all nodes except slow one
                waitForViewChange(looper,
                                  fast_nodes,
                                  expectedViewNo=view_no + 1,
                                  customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
                wait_for_elections_done_on_given_nodes(looper,
                                                       fast_nodes,
                                                       getRequiredInstances(len(nodes)),
                                                       timeout=waits.expectedPoolElectionTimeout(len(nodes)))

            # Now all the nodes receive Commits
            # The slow node will accept Commits and order the 3PC-batch in the old view
            looper.runFor(waits.expectedOrderingTime(getNoInstances(len(nodes))))

        # Now slow node receives ViewChangeDones
        waitForViewChange(looper,
                          [slow_node],
                          expectedViewNo=view_no + 1,
                          customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
        wait_for_elections_done_on_given_nodes(looper,
                                               [slow_node],
                                               getRequiredInstances(len(nodes)),
                                               timeout=waits.expectedPoolElectionTimeout(len(nodes)))

    # Now slow node receives InstanceChanges but discards them because already
    # started propagate primary to the same view.

    # Finish request gracefully
    sdk_get_reply(looper, request)
Exemplo n.º 13
0
def do_view_change_with_propagate_primary_on_one_delayed_node(
        slow_node, nodes, looper, sdk_pool_handle, sdk_wallet_client):

    slow_stasher = slow_node.nodeIbStasher

    fast_nodes = [n for n in nodes if n != slow_node]

    stashers = [n.nodeIbStasher for n in nodes]

    # Get last prepared certificate in pool
    lpc = last_prepared_certificate(nodes)
    # Get pool current view no
    view_no = lpc[0]

    with delay_rules(slow_stasher, icDelay()):
        with delay_rules(slow_stasher, vcd_delay()):
            with delay_rules(stashers, cDelay()):
                # Send request
                request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

                # Wait until this request is prepared on N-f nodes
                looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1)))

                # Trigger view change
                for n in nodes:
                    n.view_changer.on_master_degradation()

                # Wait until view change is completed on all nodes except slow one
                waitForViewChange(looper,
                                  fast_nodes,
                                  expectedViewNo=view_no + 1,
                                  customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
                wait_for_elections_done_on_given_nodes(looper,
                                                       fast_nodes,
                                                       getRequiredInstances(len(nodes)),
                                                       timeout=waits.expectedPoolElectionTimeout(len(nodes)))

            # Now all the nodes receive Commits
            # The slow node will accept Commits and order the 3PC-batch in the old view
            looper.runFor(waits.expectedOrderingTime(getNoInstances(len(nodes))))

        # Now slow node receives ViewChangeDones
        waitForViewChange(looper,
                          [slow_node],
                          expectedViewNo=view_no + 1,
                          customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
        wait_for_elections_done_on_given_nodes(looper,
                                               [slow_node],
                                               getRequiredInstances(len(nodes)),
                                               timeout=waits.expectedPoolElectionTimeout(len(nodes)))

    # Now slow node receives InstanceChanges but discards them because already
    # started propagate primary to the same view.

    # Finish request gracefully
    sdk_get_reply(looper, request)
Exemplo n.º 14
0
def testReplyWhenRequestAlreadyExecuted(looper, txnPoolNodeSet, sdk_pool_handle,
                                        sdk_wallet_client, sent1):
    """
    When a request has already been executed the previously executed reply
    will be sent again to the client. An acknowledgement will not be sent
    for a repeated request.
    """
    sdk_get_and_check_replies(looper, sent1)
    req = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    sdk_get_and_check_replies(looper, [req])
Exemplo n.º 15
0
def testReplyWhenRequestAlreadyExecuted(looper, txnPoolNodeSet,
                                        sdk_pool_handle, sdk_wallet_client,
                                        sent1):
    """
    When a request has already been executed the previously executed reply
    will be sent again to the client. An acknowledgement will not be sent
    for a repeated request.
    """
    sdk_get_and_check_replies(looper, sent1)
    req = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
    sdk_get_and_check_replies(looper, [req])
def testMsgFromInstanceDelay(configNodeSet, looper,
                             sdk_pool_handle, sdk_wallet_client):
    A, B, C, D = configNodeSet

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

    def getCommits(node: TestNode, instId: int):
        replica = node.replicas[instId]  # type: Replica
        return list(replica.commits.values())

    def checkPresence():
        for node in [C, D]:
            commReqs = getCommits(node, 0)
            assert len(commReqs) > 0
            assert Replica.generateName(A.name, 0) not in commReqs[0][0]
            commReqs = getCommits(node, 1)
            assert len(commReqs) > 0
            assert Replica.generateName(A.name, 1) in commReqs[0][0]

    numOfNodes = len(configNodeSet)
    timeout = waits.expectedClientRequestPropagationTime(numOfNodes)
    looper.run(eventually(checkPresence, retryWait=.5, timeout=timeout))
def testMsgFromInstanceDelay(configNodeSet, looper,
                             sdk_pool_handle, sdk_wallet_client):
    A, B, C, D = configNodeSet

    sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

    def getCommits(node: TestNode, instId: int):
        replica = node.replicas[instId]  # type: Replica
        return list(replica.commits.values())

    def checkPresence():
        for node in [C, D]:
            commReqs = getCommits(node, 0)
            assert len(commReqs) > 0
            assert Replica.generateName(A.name, 0) not in commReqs[0][0]
            commReqs = getCommits(node, 1)
            assert len(commReqs) > 0
            assert Replica.generateName(A.name, 1) in commReqs[0][0]

    numOfNodes = len(configNodeSet)
    timeout = waits.expectedClientRequestPropagationTime(numOfNodes)
    looper.run(eventually(checkPresence, retryWait=.5, timeout=timeout))
Exemplo n.º 18
0
def do_view_change_with_pending_request_and_one_fast_node(
        fast_node, nodes, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Perform view change while processing request, with one node receiving commits much sooner than others.
    With current implementation of view change this will result in corrupted state of fast node
    """

    fast_stasher = fast_node.nodeIbStasher

    slow_nodes = [n for n in nodes if n != fast_node]
    slow_stashers = [n.nodeIbStasher for n in slow_nodes]

    # Get last prepared certificate in pool
    lpc = last_prepared_certificate(nodes)
    # Get pool current view no
    view_no = lpc[0]

    # Delay all COMMITs
    with delay_rules(slow_stashers, cDelay()):
        with delay_rules(fast_stasher, cDelay()):
            # Send request
            request = sdk_send_random_request(looper, sdk_pool_handle,
                                              sdk_wallet_client)

            # Wait until this request is prepared on N-f nodes
            looper.run(
                eventually(check_last_prepared_certificate_on_quorum, nodes,
                           (lpc[0], lpc[1] + 1)))

            # Trigger view change
            for n in nodes:
                n.view_changer.on_master_degradation()

        # Now commits are processed on fast node
        # Wait until view change is complete
        looper.run(
            eventually(check_view_change_done, nodes, view_no + 1, timeout=60))

    # Finish request gracefully
    sdk_get_reply(looper, request)
Exemplo n.º 19
0
def do_view_change_with_pending_request_and_one_fast_node(fast_node,
                                                          nodes, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Perform view change while processing request, with one node receiving commits much sooner than others.
    With current implementation of view change this will result in corrupted state of fast node
    """

    fast_stasher = fast_node.nodeIbStasher

    slow_nodes = [n for n in nodes if n != fast_node]
    slow_stashers = [n.nodeIbStasher for n in slow_nodes]

    # Get last prepared certificate in pool
    lpc = last_prepared_certificate(nodes)
    # Get pool current view no
    view_no = lpc[0]

    # Delay all COMMITs
    with delay_rules(slow_stashers, cDelay()):
        with delay_rules(fast_stasher, cDelay()):
            # Send request
            request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

            # Wait until this request is prepared on N-f nodes
            looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1)))

            # Trigger view change
            for n in nodes:
                n.view_changer.on_master_degradation()

        # Now commits are processed on fast node
        # Wait until view change is complete
        looper.run(eventually(check_view_change_done, nodes, view_no + 1, timeout=60))

    # Finish request gracefully
    sdk_get_reply(looper, request)
def test_unstash_three_phase_msg_after_catchup(txnPoolNodeSet, looper, tconf,
                                               sdk_pool_handle,
                                               sdk_wallet_steward):
    """
    1. Delay Commit on Node4
    2. Order 1 req
    3. Delay Commit on all nodes
    4. Order 1 req
    5. Delay CatchupRep on Node4
    6. Delay Ledger Status and ViewChangeDones on Nodes1-3
    7. Start View change on all nodes
    8. Wait until Node4 got 3 stashed CatchupReps
    9. Reset delaying of Commits on all Nodes
    10. Reset Ledger Status on Nodes1-3
    11. Check that 3 nodes finished VC while Node4 is syncing and not finished
    12. Reset CatchupRep on Node4
    13. Check that Node4 finished VC, and there was just 1 round of catch-up
    """
    slow_node = txnPoolNodeSet[-1]
    fast_nodes = txnPoolNodeSet[:-1]
    view_no = txnPoolNodeSet[0].viewNo
    old_stashed = slow_node.master_replica.stasher.stash_size(STASH_VIEW_3PC)
    last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
    batches_count = last_ordered[1]

    with delay_rules(
        [n.nodeIbStasher for n in txnPoolNodeSet],
            msg_rep_delay(types_to_delay=[PREPREPARE, PREPARE, COMMIT])):

        # Delay Commit messages for slow_node.
        slow_node.nodeIbStasher.delay(cDelay(sys.maxsize))
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_steward, 1)
        batches_count += 1

        # Delay Commit messages for fast_nodes.
        for n in fast_nodes:
            n.nodeIbStasher.delay(cDelay(sys.maxsize))

        request2 = sdk_send_random_request(looper, sdk_pool_handle,
                                           sdk_wallet_steward)
        batches_count += 1

        def check_commits(commit_key):
            for n in fast_nodes:
                for r in n.replicas.values():
                    assert commit_key in r._ordering_service.commits
                    assert len(
                        r._ordering_service.commits[commit_key].voters) == 1

        looper.run(
            eventually(check_commits,
                       (view_no, last_ordered[1] + batches_count)))

        # Delay CatchupRep messages for the slow_node.
        with delay_rules([slow_node.nodeIbStasher], cr_delay()):
            with delay_rules([n.nodeIbStasher for n in fast_nodes],
                             msg_rep_delay(types_to_delay=[LEDGER_STATUS])):

                for n in txnPoolNodeSet:
                    n.start_catchup()
                looper.run(
                    eventually(
                        lambda: assertExp(slow_node.mode == Mode.discovering)))

                # Reset delay Commit messages for all nodes.
                for n in txnPoolNodeSet:
                    n.nodeIbStasher.reset_delays_and_process_delayeds(COMMIT)

                assert slow_node.mode == Mode.discovering
                looper.run(
                    eventually(_check_nodes_stashed, fast_nodes, old_stashed,
                               len(txnPoolNodeSet) - 1))
                looper.run(
                    eventually(_check_nodes_stashed, [slow_node], old_stashed,
                               (len(txnPoolNodeSet) - 1) * 2))

        sdk_get_and_check_replies(looper, [request2])
        _check_nodes_stashed(fast_nodes, old_stashed, 0)
        assert get_pp_seq_no(txnPoolNodeSet) == batches_count

    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward,
                               sdk_pool_handle)
def test_unstash_three_phase_msg_after_catchup_in_view_change(
        txnPoolNodeSet, looper, tconf, sdk_pool_handle, sdk_wallet_steward):
    """
    1. Delay Commit on Node4
    2. Order 1 req
    3. Delay Commit on all nodes
    4. Order 1 req
    5. Delay CatchupRep on Node4
    6. Delay Ledger Status and ViewChangeDones on Nodes1-3
    7. Start View change on all nodes
    8. Wait until Node4 got 3 stashed CatchupReps
    9. Reset delaying of Commits on all Nodes
    10. Reset Ledger Status on Nodes1-3
    11. Check that 3 nodes finished VC while Node4 is syncing and not finished
    12. Reset CatchupRep on Node4
    13. Check that Node4 finished VC, and there was just 1 round of cacth-up (edited)
    """
    slow_node = txnPoolNodeSet[-1]
    fast_nodes = txnPoolNodeSet[:-1]
    view_no = txnPoolNodeSet[0].viewNo
    old_stashed = slow_node.master_replica.stasher.num_stashed_future_view
    last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc

    with delay_rules(
        [n.nodeIbStasher for n in txnPoolNodeSet],
            msg_rep_delay(types_to_delay=[PREPREPARE, PREPARE, COMMIT])):

        # Delay Commit messages for slow_node.
        slow_node.nodeIbStasher.delay(cDelay(sys.maxsize))
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_steward, 1)

        # Delay Commit messages for fast_nodes.
        for n in fast_nodes:
            n.nodeIbStasher.delay(cDelay(sys.maxsize))

        request2 = sdk_send_random_request(looper, sdk_pool_handle,
                                           sdk_wallet_steward)

        def check_commits(commit_key):
            for n in fast_nodes:
                for r in n.replicas.values():
                    assert commit_key in r.commits
                    assert len(r.commits[commit_key].voters) == 1

        looper.run(eventually(check_commits, (view_no, last_ordered[1] + 2)))

        # Delay CatchupRep messages for the slow_node.
        with delay_rules([slow_node.nodeIbStasher], cr_delay()):
            with delay_rules([n.nodeIbStasher for n in fast_nodes],
                             vcd_delay()):
                with delay_rules(
                    [n.nodeIbStasher for n in fast_nodes],
                        msg_rep_delay(types_to_delay=[LEDGER_STATUS])):

                    for n in txnPoolNodeSet:
                        n.view_changer.on_master_degradation()
                    looper.run(
                        eventually(
                            lambda: assertExp(slow_node.mode == Mode.syncing)))

                    # Reset delay Commit messages for all nodes.
                    for n in txnPoolNodeSet:
                        n.nodeIbStasher.reset_delays_and_process_delayeds(
                            COMMIT)

                    assert slow_node.view_change_in_progress
                    assert slow_node.mode == Mode.syncing
                    looper.run(
                        eventually(_check_nodes_stashed, fast_nodes,
                                   old_stashed,
                                   len(txnPoolNodeSet) - 1))
                    looper.run(
                        eventually(_check_nodes_stashed, [slow_node],
                                   old_stashed, (len(txnPoolNodeSet) - 1) * 2))

            waitForViewChange(looper,
                              fast_nodes,
                              expectedViewNo=view_no + 1,
                              customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
            ensureElectionsDone(looper=looper,
                                nodes=fast_nodes,
                                instances_list=range(
                                    fast_nodes[0].requiredNumberOfInstances),
                                customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
        sdk_get_and_check_replies(looper, [request2])
        waitForViewChange(looper, [slow_node],
                          expectedViewNo=view_no + 1,
                          customTimeout=2 * tconf.VIEW_CHANGE_TIMEOUT)
        ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
        _check_nodes_stashed(fast_nodes, old_stashed, 0)
        assert all(n.master_replica.last_ordered_3pc == (last_ordered[0],
                                                         last_ordered[1] + 2)
                   for n in txnPoolNodeSet)
        assert slow_node.catchup_rounds_without_txns == 1
Exemplo n.º 22
0
def do_view_change_with_delayed_commits_and_node_restarts(
        fast_nodes,
        slow_nodes,
        nodes_to_restart,
        old_view_no,
        old_last_ordered,
        looper,
        sdk_pool_handle,
        sdk_wallet_client,
        tconf,
        tdir,
        all_plugins_path,
        wait_for_catchup=False):
    """
    Delays commits without processing on `slow_nodes`, restarts `nodes_to_restart`, triggers view change, and confirms
    that view changed completed successfully and that the ledgers are consistent and in sync.

    :param fast_nodes: Nodes that will order the requests
    :param slow_nodes: Nodes whose commits will be delay, and that will not order the requests
    :param nodes_to_restart: Nodes that will be restarted
    :param old_view_no: View that we started from
    :param old_last_ordered: Last ordered 3pc txn before we did any requests
    :param wait_for_catchup: Should we wait for restarted nodes to finish catchup
    """

    nodes = fast_nodes + slow_nodes

    slow_stashers = [slow_node.nodeIbStasher for slow_node in slow_nodes]

    # Delay commits on `slow_nodes`
    with delay_rules_without_processing(slow_stashers, cDelay()):

        request = sdk_send_random_request(looper, sdk_pool_handle,
                                          sdk_wallet_client)

        # Check that all of the nodes except the slows one ordered the request
        looper.run(
            eventually(check_last_ordered, fast_nodes,
                       (old_view_no, old_last_ordered[1] + 1)))
        looper.run(eventually(check_last_ordered, slow_nodes,
                              old_last_ordered))

    # Restart nodes
    for node in nodes_to_restart:
        disconnect_node_and_ensure_disconnected(looper,
                                                nodes,
                                                node,
                                                timeout=len(nodes_to_restart),
                                                stopNode=True)
        looper.removeProdable(node)
        nodes.remove(node)

        restarted_node = start_stopped_node(node, looper, tconf, tdir,
                                            all_plugins_path)
        nodes.append(restarted_node)

    looper.runFor(waits.expectedNodeStartUpTimeout())
    looper.run(checkNodesConnected(nodes))

    if wait_for_catchup:
        ensure_all_nodes_have_same_data(looper, nodes)

    # Trigger view change on all nodes
    for node in nodes:
        node.view_changer.on_master_degradation()

    assert len(nodes) == len(slow_nodes) + len(fast_nodes)

    # Assert that view change was successful and that ledger data is consistent
    waitForViewChange(looper,
                      nodes,
                      expectedViewNo=(old_view_no + 1),
                      customTimeout=waits.expectedPoolViewChangeStartedTimeout(
                          len(nodes)))
    ensureElectionsDone(looper=looper, nodes=nodes)
    ensure_all_nodes_have_same_data(looper, nodes)
    sdk_get_reply(looper, request)
    sdk_ensure_pool_functional(looper, nodes, sdk_wallet_client,
                               sdk_pool_handle)
Exemplo n.º 23
0
def test_primary_send_incorrect_pp(looper, txnPoolNodeSet, tconf,
                                   allPluginsPath, sdk_pool_handle,
                                   sdk_wallet_steward, monkeypatch):
    """
    Test steps:
    Delay message requests with PrePrepares on `slow_node`
    Patch sending for PrePrepare on the `malicious_primary` to send an invalid PrePrepare to slow_node
    Order a new request
    Start a view change
    Make sure it's finished on all nodes
    Make sure that the lagging node has same data with other nodes
    """
    start_view_no = txnPoolNodeSet[0].viewNo
    slow_node = txnPoolNodeSet[-1]
    malicious_primary = txnPoolNodeSet[0]
    other_nodes = [
        n for n in txnPoolNodeSet if n not in [slow_node, malicious_primary]
    ]
    timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet))
    ensure_all_nodes_have_same_data(looper,
                                    txnPoolNodeSet,
                                    custom_timeout=timeout)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 1)
    old_sender = malicious_primary.master_replica._ordering_service._send

    def patched_sender(msg, dst=None, stat=None):
        if isinstance(msg, PrePrepare) and msg:
            old_sender(msg, [n.name for n in other_nodes], stat)
            pp_dict = msg._asdict()
            pp_dict["ppTime"] += 1
            pp = PrePrepare(**pp_dict)
            old_sender(pp, [slow_node.name], stat)
            monkeypatch.undo()

    monkeypatch.setattr(malicious_primary.master_replica._ordering_service,
                        '_send', patched_sender)
    monkeypatch.setattr(slow_node.master_replica._ordering_service,
                        '_validate_applied_pre_prepare', lambda a, b, c: None)
    with delay_rules(slow_node.nodeIbStasher,
                     msg_rep_delay(types_to_delay=[PREPREPARE])):
        preprepare_process_num = slow_node.master_replica._ordering_service.spylog.count(
            OrderingService.process_preprepare)
        resp_task = sdk_send_random_request(looper, sdk_pool_handle,
                                            sdk_wallet_steward)

        def chk():
            assert preprepare_process_num + 1 == slow_node.master_replica._ordering_service.spylog.count(
                OrderingService.process_preprepare)

        looper.run(eventually(chk))

        _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_steward, 1)

        trigger_view_change(txnPoolNodeSet)
        ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
        waitForViewChange(looper,
                          txnPoolNodeSet,
                          expectedViewNo=start_view_no + 1)

        ensureElectionsDone(looper=looper,
                            nodes=txnPoolNodeSet,
                            instances_list=[0, 1])
        ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
        sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_steward,
                                   sdk_pool_handle)
Exemplo n.º 24
0
def test_sdk_new_client_send(looper, sdk_pool_handle, sdk_wallet_new_client):
    resp_task = sdk_send_random_request(looper, sdk_pool_handle,
                                        sdk_wallet_new_client)
    _, j_resp = sdk_get_reply(looper, resp_task)
    assert j_resp['result']
Exemplo n.º 25
0
def test_sdk_new_steward_send(looper, sdk_pool_handle, sdk_wallet_new_steward):
    resp_task = sdk_send_random_request(looper, sdk_pool_handle,
                                        sdk_wallet_new_steward)
    _, j_resp = sdk_get_and_check_replies(looper, [resp_task])[0]
    assert j_resp['result']
def test_view_change_gc_in_between_3pc_all_nodes_delays(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo)
    and does not remove messages from node's queues that have higher
    viewNo than last ordered one even if their ppSeqNo are less or equal
    """

    numNodes = len(txnPoolNodeSet)
    viewNo = checkViewNoForNodes(txnPoolNodeSet)

    # 1 send two messages one by one separately to make
    #  node pool working with two batches
    #    -> last_ordered_3pc = (+0, 2) [+0 means from the initial state]
    #       (last_ordered_3pc here and futher is tracked
    #       for master instances only cause non-master ones have
    #       specific logic of its management which we don't care in
    #       the test, see Replica::_setup_for_non_master)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    batches_count = get_pp_seq_no(txnPoolNodeSet)
    last_ordered_3pc = (viewNo, batches_count)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 2)

    # 2 do view change
    #    -> GC should remove it from nodes' queues
    #    -> viewNo = +1
    ensure_view_change_complete(looper, txnPoolNodeSet)
    batches_count += 1

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    looper.run(
        eventually(check_nodes_last_ordered_3pc, txnPoolNodeSet,
                   (viewNo, batches_count)))
    check_nodes_requests_size(txnPoolNodeSet, 0)

    # 3 slow processing 3PC messages for all nodes (all replica instances)
    #   randomly and send one more message
    #    -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries
    #       should at least send PRE-PREPAREs
    # TODO could it be not enough for wainting that at least primary
    # has sent PRE-PREPARE
    propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes)
    delay_3pc_messages(txnPoolNodeSet, 0, delay=propagationTimeout * 2)
    delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2)
    requests = sdk_send_random_request(looper, sdk_pool_handle,
                                       sdk_wallet_client)

    def checkPrePrepareSentAtLeastByPrimary():
        for node in txnPoolNodeSet:
            for replica in node.replicas.values():
                if replica.isPrimary:
                    assert len(replica._ordering_service.sent_preprepares)

    looper.run(
        eventually(checkPrePrepareSentAtLeastByPrimary,
                   retryWait=0.1,
                   timeout=propagationTimeout))
    # 4 do view change
    #    -> GC shouldn't remove anything because
    #       last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1)
    #    -> viewNo = 2
    ensure_view_change_complete(looper, txnPoolNodeSet)
    batches_count += 1

    viewNoNew = checkViewNoForNodes(txnPoolNodeSet)
    # another view change could happen because of slow nodes
    assert viewNoNew - viewNo in (1, 2)
    viewNo = viewNoNew
    check_nodes_last_ordered_3pc(txnPoolNodeSet,
                                 (last_ordered_3pc[0] + 1, batches_count - 1))
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 5 reset delays and wait for replies
    #    -> new primaries should send new 3pc for last message
    #       with 3pc key (+2, 1)
    #    -> they should be ordered
    #    -> last_ordered_3pc = (+2, 1)
    reset_delays_and_process_delayeds(txnPoolNodeSet)
    sdk_get_replies(looper, [requests])
    batches_count += 1

    checkViewNoForNodes(txnPoolNodeSet, viewNo)
    last_ordered_3pc = (viewNo, batches_count)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 6 do view change
    #    -> GC should remove them
    ensure_view_change_complete(looper, txnPoolNodeSet)
    batches_count += 1

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet,
                                 (last_ordered_3pc[0] + 1, batches_count))
    check_nodes_requests_size(txnPoolNodeSet, 0)
def test_commit_signature_validation_integration(looper,
                                                 txnPoolNodeSet,
                                                 sdk_pool_handle,
                                                 sdk_wallet_steward,
                                                 sdk_wallet_client,
                                                 tconf,
                                                 tdir):
    '''
    All nodes receive PrePrepare1(txn1 for pool_ledger)
    Nodes 1, 2 ordered txn1 and nodes 3, 4 did not.
    All nodes  receive PrePrepare2(txn2 for domain_ledger)
    Nodes 3, 4 receive commits from nodes 1, 2
    Nodes 3, 4 ordered txn1
    Check that all nodes ordered txn2
    '''
    fast_nodes = txnPoolNodeSet[:2]
    slow_nodes = txnPoolNodeSet[2:]

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    # create new steward
    new_steward_wallet_handle = sdk_add_new_nym(looper,
                                                sdk_pool_handle,
                                                sdk_wallet_steward,
                                                alias="testClientSteward945",
                                                role=STEWARD_STRING)

    sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
        prepare_new_node_data(tconf, tdir, "new_node")

    # create node request to add new demote node
    _, steward_did = new_steward_wallet_handle
    node_request = looper.loop.run_until_complete(
        prepare_node_request(steward_did,
                             new_node_name="new_node",
                             clientIp=clientIp,
                             clientPort=clientPort,
                             nodeIp=nodeIp,
                             nodePort=nodePort,
                             bls_key=bls_key,
                             sigseed=sigseed,
                             services=[],
                             key_proof=key_proof))

    first_ordered = txnPoolNodeSet[0].master_last_ordered_3PC
    with ord_delay(slow_nodes):
        request1 = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle,
                                                      sdk_pool_handle, node_request)

        key1 = get_key_from_req(request1[0])

        def check_nodes_receive_pp(view_no, seq_no):
            for node in txnPoolNodeSet:
                assert node.master_replica.getPrePrepare(view_no, seq_no)

        looper.run(eventually(check_nodes_receive_pp, first_ordered[0], first_ordered[1] + 1))

        def check_fast_nodes_ordered_request():
            for n in fast_nodes:
                assert key1 not in n.requests or n.requests[key1].executed
            for n in slow_nodes:
                assert not n.requests[key1].executed

        looper.run(eventually(check_fast_nodes_ordered_request))

        request2 = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
        looper.run(eventually(check_nodes_receive_pp, first_ordered[0], first_ordered[1] + 2))

        def check_nodes_receive_commits(view_no, seq_no):
            for node in txnPoolNodeSet:
                assert len(node.master_replica.commits[view_no, seq_no].voters) >= node.f + 1
        looper.run(eventually(check_nodes_receive_commits, first_ordered[0], first_ordered[1] + 2))

    sdk_get_and_check_replies(looper, [request1])
    sdk_get_and_check_replies(looper, [request2])
def test_view_change_gc_in_between_3pc_all_nodes_delays(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo)
    and does not remove messages from node's queues that have higher
    viewNo than last ordered one even if their ppSeqNo are less or equal
    """

    numNodes = len(txnPoolNodeSet)
    viewNo = checkViewNoForNodes(txnPoolNodeSet)

    # 1 send two messages one by one separately to make
    #  node pool working with two batches
    #    -> last_ordered_3pc = (+0, 2) [+0 means from the initial state]
    #       (last_ordered_3pc here and futher is tracked
    #       for master instances only cause non-master ones have
    #       specific logic of its management which we don't care in
    #       the test, see Replica::_setup_for_non_master)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    last_ordered_3pc = (viewNo, 2)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 2)

    # 2 do view change
    #    -> GC should remove it from nodes' queues
    #    -> viewNo = +1
    ensure_view_change_complete(looper, txnPoolNodeSet)

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 0)

    # 3 slow processing 3PC messages for all nodes (all replica instances)
    #   randomly and send one more message
    #    -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries
    #       should at least send PRE-PREPAREs
    # TODO could it be not enough for wainting that at least primary
    # has sent PRE-PREPARE
    propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes)
    delay_3pc_messages(txnPoolNodeSet,
                       0,
                       delay=propagationTimeout * 2)
    delay_3pc_messages(txnPoolNodeSet,
                       1,
                       delay=propagationTimeout * 2)
    requests = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

    def checkPrePrepareSentAtLeastByPrimary():
        for node in txnPoolNodeSet:
            for replica in node.replicas.values():
                if replica.isPrimary:
                    assert len(replica.sentPrePrepares)

    looper.run(eventually(checkPrePrepareSentAtLeastByPrimary,
                          retryWait=0.1,
                          timeout=propagationTimeout))
    # 4 do view change
    #    -> GC shouldn't remove anything because
    #       last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1)
    #    -> viewNo = 2
    ensure_view_change_complete(looper, txnPoolNodeSet)

    viewNoNew = checkViewNoForNodes(txnPoolNodeSet)
    # another view change could happen because of slow nodes
    assert viewNoNew - viewNo in (1, 2)
    viewNo = viewNoNew
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 5 reset delays and wait for replies
    #    -> new primaries should send new 3pc for last message
    #       with 3pc key (+2, 1)
    #    -> they should be ordered
    #    -> last_ordered_3pc = (+2, 1)
    reset_delays_and_process_delayeds(txnPoolNodeSet)
    sdk_get_replies(looper, [requests])

    checkViewNoForNodes(txnPoolNodeSet, viewNo)
    last_ordered_3pc = (viewNo, 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 6 do view change
    #    -> GC should remove them
    ensure_view_change_complete(looper, txnPoolNodeSet)

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 0)
def test_commit_signature_validation_integration(looper,
                                                 txnPoolNodeSet,
                                                 sdk_pool_handle,
                                                 sdk_wallet_steward,
                                                 sdk_wallet_client,
                                                 tconf,
                                                 tdir):
    '''
    All nodes receive PrePrepare1(txn1 for pool_ledger)
    Nodes 1, 2 ordered txn1 and nodes 3, 4 did not.
    All nodes  receive PrePrepare2(txn2 for domain_ledger)
    Nodes 3, 4 receive commits from nodes 1, 2
    Nodes 3, 4 ordered txn1
    Check that all nodes ordered txn2
    '''
    fast_nodes = txnPoolNodeSet[:2]
    slow_nodes = txnPoolNodeSet[2:]

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 1)

    # create new steward
    new_steward_wallet_handle = sdk_add_new_nym(looper,
                                                sdk_pool_handle,
                                                sdk_wallet_steward,
                                                alias="testClientSteward945",
                                                role=STEWARD_STRING)

    sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
        prepare_new_node_data(tconf, tdir, "new_node")

    # create node request to add new demote node
    _, steward_did = new_steward_wallet_handle
    node_request = looper.loop.run_until_complete(
        prepare_node_request(steward_did,
                             new_node_name="new_node",
                             clientIp=clientIp,
                             clientPort=clientPort,
                             nodeIp=nodeIp,
                             nodePort=nodePort,
                             bls_key=bls_key,
                             sigseed=sigseed,
                             services=[],
                             key_proof=key_proof))

    first_ordered = txnPoolNodeSet[0].master_last_ordered_3PC
    with ord_delay(slow_nodes):
        request1 = sdk_sign_and_send_prepared_request(looper, new_steward_wallet_handle,
                                                      sdk_pool_handle, node_request)

        key1 = get_key_from_req(request1[0])

        def check_nodes_receive_pp(view_no, seq_no):
            for node in txnPoolNodeSet:
                assert node.master_replica._ordering_service.get_preprepare(view_no, seq_no)

        looper.run(eventually(check_nodes_receive_pp, first_ordered[0], first_ordered[1] + 1))

        def check_fast_nodes_ordered_request():
            for n in fast_nodes:
                assert key1 not in n.requests or n.requests[key1].executed
            for n in slow_nodes:
                assert not n.requests[key1].executed

        looper.run(eventually(check_fast_nodes_ordered_request))

        request2 = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)
        looper.run(eventually(check_nodes_receive_pp, first_ordered[0], first_ordered[1] + 2))

        def check_nodes_receive_commits(view_no, seq_no):
            for node in txnPoolNodeSet:
                assert len(node.master_replica._ordering_service.commits[view_no, seq_no].voters) >= node.f + 1
        looper.run(eventually(check_nodes_receive_commits, first_ordered[0], first_ordered[1] + 2))

    sdk_get_and_check_replies(looper, [request1])
    sdk_get_and_check_replies(looper, [request2])