def test_commits_recvd_first(looper, txnPoolNodeSet, sdk_wallet_client,
                             sdk_pool_handle):
    slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1]
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    delay = 50
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=20,
                                         num_batches=4)

    assert not slow_node.master_replica.prePrepares
    assert not slow_node.master_replica.prepares
    assert not slow_node.master_replica.commits
    assert len(slow_node.master_replica.commitsWaitingForPrepare) > 0

    slow_node.reset_delays_and_process_delayeds()
    waitNodeDataEquality(looper, slow_node, *other_nodes)
    assert check_if_all_equal_in_list(
        [n.master_replica.ordered for n in txnPoolNodeSet])

    assert slow_node.master_replica.prePrepares
    assert slow_node.master_replica.prepares
    assert slow_node.master_replica.commits
    assert not slow_node.master_replica.commitsWaitingForPrepare
Beispiel #2
0
def chk_if_equal_txn_to_3pc(nodes, count=None):
    txn_to_tpc = []
    for node in nodes:
        txn_to_tpc.append(node.txn_seq_range_to_3phase_key[DOMAIN_LEDGER_ID])
    assert check_if_all_equal_in_list(txn_to_tpc)
    if count is not None:
        assert len(txn_to_tpc[0]) == count
Beispiel #3
0
def test_dequeue_and_validate_commits(looper, txnPoolNodeSet,
                                      sdk_wallet_client, sdk_pool_handle):
    slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1]
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    delay = 50
    with delay_rules(slow_node.nodeIbStasher, pDelay(delay),
                     msg_rep_delay(delay, [PREPARE, PREPREPARE])):
        with delay_rules(slow_node.nodeIbStasher, ppDelay(delay)):

            sdk_send_batches_of_random_and_check(looper,
                                                 txnPoolNodeSet,
                                                 sdk_pool_handle,
                                                 sdk_wallet_client,
                                                 num_reqs=1,
                                                 num_batches=1)

            assert not slow_node.master_replica._ordering_service.prePrepares
            assert not slow_node.master_replica._ordering_service.prepares
            assert not slow_node.master_replica._ordering_service.commits
            assert len(slow_node.master_replica._ordering_service.
                       commitsWaitingForPrepare) > 0

        waitNodeDataEquality(looper, slow_node, *other_nodes)
        assert check_if_all_equal_in_list([
            n.master_replica._ordering_service.ordered for n in txnPoolNodeSet
        ])

        assert slow_node.master_replica._ordering_service.prePrepares
        assert slow_node.master_replica._ordering_service.prepares
        assert slow_node.master_replica._ordering_service.commits
        assert not slow_node.master_replica._ordering_service.commitsWaitingForPrepare

        assert all(slow_node.master_replica.last_ordered_3pc ==
                   n.master_replica.last_ordered_3pc for n in other_nodes)
Beispiel #4
0
 def last_prepared(nodes):
     lst = [
         n.master_replica.last_prepared_certificate_in_view() for n in nodes
     ]
     # All nodes have same last prepared
     assert check_if_all_equal_in_list(lst)
     return lst[0]
def test_commits_recvd_first(looper, txnPoolNodeSet,
                             sdk_wallet_client, sdk_pool_handle):
    slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1]
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    delay = 50
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=20,
                                         num_batches=4)

    assert not slow_node.master_replica.prePrepares
    assert not slow_node.master_replica.prepares
    assert not slow_node.master_replica.commits
    assert len(slow_node.master_replica.commitsWaitingForPrepare) > 0

    slow_node.reset_delays_and_process_delayeds()
    waitNodeDataEquality(looper, slow_node, *other_nodes)
    assert check_if_all_equal_in_list([n.master_replica.ordered
                                       for n in txnPoolNodeSet])

    assert slow_node.master_replica.prePrepares
    assert slow_node.master_replica.prepares
    assert slow_node.master_replica.commits
    assert not slow_node.master_replica.commitsWaitingForPrepare
def chk_if_equal_txn_to_3pc(nodes, count=None):
    txn_to_tpc = []
    for node in nodes:
        txn_to_tpc.append(node.txn_seq_range_to_3phase_key[DOMAIN_LEDGER_ID])
    assert check_if_all_equal_in_list(txn_to_tpc)
    if count is not None:
        assert len(txn_to_tpc[0]) == count
def test_node_requests_missing_preprepare_malicious(looper, txnPoolNodeSet,
                                                    sdk_wallet_client,
                                                    sdk_pool_handle,
                                                    malicious_setup, teardown):
    """
    A node has bad network with primary and thus loses PRE-PREPARE,
    it requests PRE-PREPARE from non-primaries once it has sufficient PREPAREs
    but one of the non-primary does not send the PRE-PREPARE
    """
    # primary_node = get_master_primary_node(txnPoolNodeSet)
    # slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[-1].node
    # other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    # bad_node = [n for n in other_nodes if n != primary_node][0]
    # good_non_primary_node = [n for n in other_nodes if n != slow_node
    #                          and n != bad_node and n != primary_node][0]
    primary_node, bad_node, good_non_primary_node, slow_node, other_nodes, \
    bad_method, orig_method = malicious_setup

    slow_node.nodeIbStasher.delay(ppDelay(300, 0))

    def get_reply_count_frm(node):
        return sum([
            1
            for entry in slow_node.spylog.getAll(slow_node.process_message_rep)
            if entry.params['msg'].msg_type == PREPREPARE
            and entry.params['frm'] == node.name
        ])

    old_reply_count_from_bad_node = get_reply_count_frm(bad_node)
    old_reply_count_from_good_node = get_reply_count_frm(good_non_primary_node)
    old_discarded = countDiscarded(slow_node.master_replica, 'does not have '
                                   'expected state')

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=10,
                                         num_batches=2)

    waitNodeDataEquality(looper, slow_node, *other_nodes)

    assert check_if_all_equal_in_list(
        [n.master_replica.ordered for n in txnPoolNodeSet])

    assert not slow_node.master_replica.requested_pre_prepares

    if bad_method.__name__ == 'do_not_send':
        assert get_reply_count_frm(bad_node) == old_reply_count_from_bad_node
    else:
        assert countDiscarded(slow_node.master_replica,
                              'does not have expected state') > old_discarded

    assert get_reply_count_frm(good_non_primary_node) > \
           old_reply_count_from_good_node

    slow_node.reset_delays_and_process_delayeds()
    bad_node.nodeMsgRouter.routes[MessageReq] = orig_method
def test_node_requests_missing_preprepare(looper, txnPoolNodeSet,
                                          sdk_wallet_client, sdk_pool_handle,
                                          teardown):
    """
    A node has bad network with primary and thus loses PRE-PREPARE,
    it requests PRE-PREPARE from primary once it has sufficient PREPAREs
    """
    slow_node, other_nodes, primary_node, other_non_primary_nodes = split_nodes(
        txnPoolNodeSet)

    # Delay PRE-PREPAREs by large amount simulating loss
    slow_node.nodeIbStasher.delay(ppDelay(300, 0))
    old_count_pp = get_count(
        slow_node.master_replica,
        slow_node.master_replica._ordering_service.process_preprepare)
    old_count_mrq = {
        n.name: get_count(n, n.process_message_req)
        for n in other_nodes
    }
    old_count_mrp = get_count(slow_node, slow_node.process_message_rep)

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=15,
                                         num_batches=5)

    waitNodeDataEquality(looper, slow_node, *other_nodes)

    assert not slow_node.master_replica.requested_pre_prepares

    # `slow_node` processed PRE-PREPARE
    # assert get_count(slow_node.master_replica,
    #                  slow_node.master_replica._ordering_service.process_preprepare) > old_count_pp

    # `slow_node` did receive `MessageRep`
    assert get_count(slow_node, slow_node.process_message_rep) > old_count_mrp

    # Primary node should received `MessageReq` and other nodes shouldn't
    recv_reqs = set()
    for n in other_non_primary_nodes:
        if get_count(n, n.process_message_req) > old_count_mrq[n.name]:
            recv_reqs.add(n.name)

    assert get_count(primary_node, primary_node.process_message_req) > \
           old_count_mrq[primary_node.name]
    assert len(recv_reqs) == 0

    # All nodes including the `slow_node` ordered the same requests
    assert check_if_all_equal_in_list(
        [n.master_replica._ordering_service.ordered for n in txnPoolNodeSet])
def test_node_requests_missing_preprepare(looper, txnPoolNodeSet,
                                          sdk_wallet_client, sdk_pool_handle,
                                          teardown):
    """
    A node has bad network with primary and thus loses PRE-PREPARE,
    it requests PRE-PREPARE from primary once it has sufficient PREPAREs
    """
    slow_node, other_nodes, primary_node, other_non_primary_nodes = split_nodes(
        txnPoolNodeSet)

    # Delay PRE-PREPAREs by large amount simulating loss
    slow_node.nodeIbStasher.delay(ppDelay(300, 0))
    old_count_pp = get_count(slow_node.master_replica,
                             slow_node.master_replica.processPrePrepare)
    old_count_mrq = {n.name: get_count(n, n.process_message_req)
                     for n in other_nodes}
    old_count_mrp = get_count(slow_node, slow_node.process_message_rep)

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=15,
                                         num_batches=5)

    waitNodeDataEquality(looper, slow_node, *other_nodes)

    assert not slow_node.master_replica.requested_pre_prepares

    # `slow_node` processed PRE-PREPARE
    assert get_count(slow_node.master_replica,
                     slow_node.master_replica.processPrePrepare) > old_count_pp

    # `slow_node` did receive `MessageRep`
    assert get_count(slow_node, slow_node.process_message_rep) > old_count_mrp

    # Primary node should received `MessageReq` and other nodes shouldn't
    recv_reqs = set()
    for n in other_non_primary_nodes:
        if get_count(n, n.process_message_req) > old_count_mrq[n.name]:
            recv_reqs.add(n.name)

    assert get_count(primary_node, primary_node.process_message_req) > \
           old_count_mrq[primary_node.name]
    assert len(recv_reqs) == 0

    # All nodes including the `slow_node` ordered the same requests
    assert check_if_all_equal_in_list([n.master_replica.ordered
                                       for n in txnPoolNodeSet])
Beispiel #10
0
    def tear():
        # Repair any broken network
        for node in txnPoolNodeSet:
            node.reset_delays_and_process_delayeds()
        # Give a little time to process any delayed messages
        looper.runFor(3)

        # Check each node has same data
        ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

        # Check each node has ordered all requests (no catchup)
        assert check_if_all_equal_in_list([n.master_replica.ordered
                                           for n in txnPoolNodeSet])

        # Check the network is functional since all nodes reply
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5)
Beispiel #11
0
    def tear():
        # Repair any broken network
        for node in txnPoolNodeSet:
            node.reset_delays_and_process_delayeds()
        # Give a little time to process any delayed messages
        looper.runFor(3)

        # Check each node has same data
        ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

        # Check each node has ordered all requests (no catchup)
        assert check_if_all_equal_in_list([n.master_replica.ordered
                                           for n in txnPoolNodeSet])

        # Check the network is functional since all nodes reply
        send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 5)
Beispiel #12
0
def test_list_item_equality():
    l = [
        {'a': 1, 'b': 2, 'c': 3},
        {'c': 3, 'a': 1, 'b': 2},
        {'c': 3, 'a': 1, 'b': 2},
        {'a': 1, 'b': 2, 'c': 3},
        {'c': 3, 'a': 1, 'b': 2},
        {'b': 2, 'c': 3, 'a': 1},
    ]
    l1 = [{'a', 'b', 'c', 1}, {'c', 'a', 'b', 1}, {1, 'a', 'c', 'b'}]
    assert check_if_all_equal_in_list(l)
    assert check_if_all_equal_in_list(l1)
    assert check_if_all_equal_in_list([1, 1, 1, 1])
    assert check_if_all_equal_in_list(['a', 'a', 'a', 'a'])
    assert not check_if_all_equal_in_list(['b', 'a', 'a', 'a'])
    assert not check_if_all_equal_in_list(l + [{'a': 1, 'b': 2, 'c': 33}])
    assert not check_if_all_equal_in_list(l1 + [{'c', 'a', 'b', 11}])
Beispiel #13
0
def test_list_item_equality():
    l = [
        {'a': 1, 'b': 2, 'c': 3},
        {'c': 3, 'a': 1, 'b': 2},
        {'c': 3, 'a': 1, 'b': 2},
        {'a': 1, 'b': 2, 'c': 3},
        {'c': 3, 'a': 1, 'b': 2},
        {'b': 2, 'c': 3, 'a': 1},
    ]
    l1 = [{'a', 'b', 'c', 1}, {'c', 'a', 'b', 1}, {1, 'a', 'c', 'b'}]
    assert check_if_all_equal_in_list(l)
    assert check_if_all_equal_in_list(l1)
    assert check_if_all_equal_in_list([1, 1, 1, 1])
    assert check_if_all_equal_in_list(['a', 'a', 'a', 'a'])
    assert not check_if_all_equal_in_list(['b', 'a', 'a', 'a'])
    assert not check_if_all_equal_in_list(l + [{'a': 1, 'b': 2, 'c': 33}])
    assert not check_if_all_equal_in_list(l1 + [{'c', 'a', 'b', 11}])
def test_node_requests_missing_preprepare(looper, txnPoolNodeSet, client1,
                                          wallet1, client1Connected, teardown):
    """
    A node has bad network with primary and thus loses PRE-PREPARE,
    it requests PRE-PREPARE from non-primaries once it has sufficient PREPAREs
    """
    slow_node, other_nodes, _, _ = split_nodes(txnPoolNodeSet)

    # Delay PRE-PREPAREs by large amount simulating loss
    slow_node.nodeIbStasher.delay(ppDelay(300, 0))
    old_count_pp = get_count(slow_node.master_replica,
                             slow_node.master_replica.processPrePrepare)
    old_count_mrq = {
        n.name: get_count(n, n.process_message_req)
        for n in other_nodes
    }
    old_count_mrp = get_count(slow_node, slow_node.process_message_rep)

    send_reqs_batches_and_get_suff_replies(looper, wallet1, client1, 15, 5)

    waitNodeDataEquality(looper, slow_node, *other_nodes)

    assert not slow_node.master_replica.requested_pre_prepares

    # `slow_node` processed PRE-PREPARE
    assert get_count(slow_node.master_replica,
                     slow_node.master_replica.processPrePrepare) > old_count_pp

    # `slow_node` did receive `MessageRep`
    assert get_count(slow_node, slow_node.process_message_rep) > old_count_mrp

    # More than `f` nodes received `MessageReq`
    recv_reqs = set()
    for n in other_nodes:
        if get_count(n, n.process_message_req) > old_count_mrq[n.name]:
            recv_reqs.add(n.name)

    assert len(recv_reqs) > slow_node.f

    # All nodes including the `slow_node` ordered the same requests
    assert check_if_all_equal_in_list(
        [n.master_replica.ordered for n in txnPoolNodeSet])
def test_commits_recvd_first(looper, txnPoolNodeSet, client1, wallet1,
                             client1Connected):
    slow_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet, 0)][-1]
    other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    delay = 50
    slow_node.nodeIbStasher.delay(ppDelay(delay, 0))
    slow_node.nodeIbStasher.delay(pDelay(delay, 0))

    send_reqs_batches_and_get_suff_replies(looper, wallet1, client1, 20, 4)

    assert not slow_node.master_replica.prePrepares
    assert not slow_node.master_replica.prepares
    assert not slow_node.master_replica.commits
    assert len(slow_node.master_replica.commitsWaitingForPrepare) > 0

    slow_node.reset_delays_and_process_delayeds()
    waitNodeDataEquality(looper, slow_node, *other_nodes)
    assert check_if_all_equal_in_list(
        [n.master_replica.ordered for n in txnPoolNodeSet])

    assert slow_node.master_replica.prePrepares
    assert slow_node.master_replica.prepares
    assert slow_node.master_replica.commits
    assert not slow_node.master_replica.commitsWaitingForPrepare
Beispiel #16
0
def check_uncommitteds_equal(nodes):
    t_roots = [node.domainLedger.uncommittedRootHash for node in nodes]
    s_roots = [node.states[DOMAIN_LEDGER_ID].headHash for node in nodes]
    assert check_if_all_equal_in_list(t_roots)
    assert check_if_all_equal_in_list(s_roots)
    return t_roots[0], s_roots[0]
Beispiel #17
0
def check_last_3pc_master(node, other_nodes):
    last_3pc = [node.replicas[0].last_ordered_3pc]
    for n in other_nodes:
        last_3pc.append(n.replicas[0].last_ordered_3pc)
    assert check_if_all_equal_in_list(last_3pc)
Beispiel #18
0
def test_reverted_unordered(txnPoolNodeSet, looper, sdk_pool_handle,
                            sdk_wallet_client):
    """
    Before starting catchup, revert any uncommitted changes to state and
    ledger. This is to avoid any re-application of requests that were
    ordered but stashed
    Example scenario
    prepared (1, 4)
    start_view_change
    start_catchup
    ...
    ....
    ...
    committed and send Ordered (1, 2)
    ...
    ....
    preLedgerCatchUp
    force_process_ordered, take out (1,2) and stash (1, 2)
    now process stashed Ordered(1,2), its requests will be applied again

    Simulation: Delay COMMITs to a node so that it can not order requests
    but has prepared them. Then trigger a view change and make sure the slow
    node has not ordered same number of requests as others but has prepared
    so it can order when it receives COMMITs while view change is in progress.
    The slow node should revert unordered batches and but it should eventually
    process the ordered requests, so delay LEDGER_STATUS too so catchup
    is delayed
    """
    slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[-1].node
    fast_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    slow_node.nodeIbStasher.delay(cDelay(120, 0))
    sent_batches = 5
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client,
                                         2 * sent_batches, sent_batches)

    # Fast nodes have same last ordered and same data
    last_ordered = [n.master_last_ordered_3PC for n in fast_nodes]
    assert check_if_all_equal_in_list(last_ordered)
    ensure_all_nodes_have_same_data(looper, fast_nodes)

    # Slow nodes have different last ordered than fast nodes
    assert last_ordered[0] != slow_node.master_last_ordered_3PC

    # Delay LEDGER_STATUS so catchup starts late
    slow_node.nodeIbStasher.delay(lsDelay(100))
    slow_node.nodeIbStasher.delay(msg_rep_delay(100))
    slow_node.nodeIbStasher.delay(cr_delay(100))

    # slow_node has not reverted batches
    assert sent_batches not in getAllReturnVals(
        slow_node.master_replica,
        slow_node.master_replica.revert_unordered_batches)

    ensure_view_change(looper, txnPoolNodeSet)

    def chk1():
        # slow_node reverted all batches
        rv = getAllReturnVals(
            slow_node.master_replica,
            slow_node.master_replica.revert_unordered_batches)
        assert sent_batches in rv

    looper.run(eventually(chk1, retryWait=1))

    # After the view change slow_node has prepared same requests as the fast
    # nodes have ordered
    assert last_ordered[
        0] == slow_node.master_replica.last_prepared_before_view_change

    # Deliver COMMITs
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds(COMMIT)

    def chk2():
        # slow_node stashed commits
        assert slow_node.master_replica.stasher.num_stashed_catchup == \
               sent_batches * (len(txnPoolNodeSet) - 1)

    looper.run(eventually(chk2, retryWait=1))

    # Deliver LEDGER_STATUS so catchup can complete
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds(LEDGER_STATUS)
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds(MESSAGE_RESPONSE)
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds(CATCHUP_REP)

    # Ensure all nodes have same data
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)

    def chk3():
        # slow_node processed stashed messages successfully
        assert slow_node.master_replica.stasher.num_stashed_catchup == 0

    looper.run(eventually(chk3, retryWait=1))

    # Ensure pool is functional
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client,
                                         10, 2)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_reverted_unordered(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Before starting catchup, revert any uncommitted changes to state and
    ledger. This is to avoid any re-application of requests that were
    ordered but stashed
    Example scenario
    prepared (1, 4)
    startViewChange
    start_catchup
    ...
    ....
    ...
    committed and send Ordered (1, 2)
    ...
    ....
    preLedgerCatchUp
    force_process_ordered, take out (1,2) and stash (1, 2)
    now process stashed Ordered(1,2), its requests will be applied again

    Simulation: Delay COMMITs to a node so that it can not order requests
    but has prepared them. Then trigger a view change and make sure the slow
    node has not ordered same number of requests as others but has prepared
    so it can order when it receives COMMITs while view change is in progress.
    The slow node should revert unordered batches and but it should eventually
    process the ordered requests, so delay LEDGER_STATUS too so catchup
    is delayed
    """
    slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[-1].node
    fast_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    slow_node.nodeIbStasher.delay(cDelay(120, 0))
    sent_batches = 5
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_client, 2 * sent_batches, sent_batches)

    # Fast nodes have same last ordered and same data
    last_ordered = [n.master_last_ordered_3PC for n in fast_nodes]
    assert check_if_all_equal_in_list(last_ordered)
    ensure_all_nodes_have_same_data(looper, fast_nodes)

    # Slow nodes have different last ordered than fast nodes
    assert last_ordered[0] != slow_node.master_last_ordered_3PC

    # Delay LEDGER_STATUS so catchup starts late
    slow_node.nodeIbStasher.delay(lsDelay(100))
    slow_node.nodeIbStasher.delay(msg_rep_delay(100))

    # slow_node has not reverted batches
    assert sent_batches not in getAllReturnVals(
        slow_node.master_replica,
        slow_node.master_replica.revert_unordered_batches)

    ensure_view_change(looper, txnPoolNodeSet)

    def chk1():
        # slow_node reverted all batches
        rv = getAllReturnVals(slow_node.master_replica,
                              slow_node.master_replica.revert_unordered_batches)
        assert sent_batches in rv

    looper.run(eventually(chk1, retryWait=1))

    # After the view change slow_node has prepared same requests as the fast
    # nodes have ordered
    assert last_ordered[0] == slow_node.master_replica.last_prepared_before_view_change

    # Deliver COMMITs
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds(COMMIT)

    def chk2():
        # slow_node orders all requests as others have
        assert last_ordered[0] == slow_node.master_last_ordered_3PC

    looper.run(eventually(chk2, retryWait=1))

    # Deliver LEDGER_STATUS so catchup can complete
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds(LEDGER_STATUS)
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds(MESSAGE_RESPONSE)

    # Ensure all nodes have same data
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)

    def chk3():
        # slow_node processed stashed Ordered requests successfully
        rv = getAllReturnVals(slow_node,
                              slow_node.processStashedOrderedReqs)
        assert sent_batches in rv

    looper.run(eventually(chk3, retryWait=1))

    # Ensure pool is functional
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_client, 10, 2)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
Beispiel #20
0
def check_uncommitteds_equal(nodes):
    t_roots = [node.domainLedger.uncommittedRootHash for node in nodes]
    s_roots = [node.states[DOMAIN_LEDGER_ID].headHash for node in nodes]
    assert check_if_all_equal_in_list(t_roots)
    assert check_if_all_equal_in_list(s_roots)
    return t_roots[0], s_roots[0]
Beispiel #21
0
def check_last_3pc_master(node, other_nodes):
    last_3pc = [node.replicas[0].last_ordered_3pc]
    for n in other_nodes:
        last_3pc.append(n.replicas[0].last_ordered_3pc)
    assert check_if_all_equal_in_list(last_3pc)
 def last_prepared(nodes):
     lst = [n.master_replica.last_prepared_certificate_in_view()
            for n in nodes]
     # All nodes have same last prepared
     assert check_if_all_equal_in_list(lst)
     return lst[0]