def test_node_requests_missing_preprepare_malicious(looper, txnPoolNodeSet,
                                                    sdk_wallet_client,
                                                    sdk_pool_handle,
                                                    malicious_setup, teardown):
    """
    A node has bad network with primary and thus loses PRE-PREPARE,
    it requests PRE-PREPARE from non-primaries once it has sufficient PREPAREs
    but one of the non-primary does not send the PRE-PREPARE
    """
    # primary_node = get_master_primary_node(txnPoolNodeSet)
    # slow_node = getNonPrimaryReplicas(txnPoolNodeSet, 0)[-1].node
    # other_nodes = [n for n in txnPoolNodeSet if n != slow_node]
    # bad_node = [n for n in other_nodes if n != primary_node][0]
    # good_non_primary_node = [n for n in other_nodes if n != slow_node
    #                          and n != bad_node and n != primary_node][0]
    primary_node, bad_node, good_non_primary_node, slow_node, other_nodes, \
    bad_method, orig_method = malicious_setup

    slow_node.nodeIbStasher.delay(ppDelay(300, 0))

    def get_reply_count_frm(node):
        return sum([
            1
            for entry in slow_node.spylog.getAll(slow_node.process_message_rep)
            if entry.params['msg'].msg_type == PREPREPARE
            and entry.params['frm'] == node.name
        ])

    old_reply_count_from_bad_node = get_reply_count_frm(bad_node)
    old_reply_count_from_good_node = get_reply_count_frm(good_non_primary_node)
    old_discarded = countDiscarded(slow_node.master_replica, 'does not have '
                                   'expected state')

    sdk_send_batches_of_random_and_check(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         num_reqs=10,
                                         num_batches=2)

    waitNodeDataEquality(looper, slow_node, *other_nodes)

    assert check_if_all_equal_in_list(
        [n.master_replica.ordered for n in txnPoolNodeSet])

    assert not slow_node.master_replica.requested_pre_prepares

    if bad_method.__name__ == 'do_not_send':
        assert get_reply_count_frm(bad_node) == old_reply_count_from_bad_node
    else:
        assert countDiscarded(slow_node.master_replica,
                              'does not have expected state') > old_discarded

    assert get_reply_count_frm(good_non_primary_node) > \
           old_reply_count_from_good_node

    slow_node.reset_delays_and_process_delayeds()
    bad_node.nodeMsgRouter.routes[MessageReq] = orig_method
def fill_counters(nodes, log_message):
    global service_discard_counts
    service_discard_counts[log_message] = {
        n.master_replica._message_req_service.name:
        countDiscarded(n.master_replica._message_req_service, log_message)
        for n in nodes
    }
    node_discard_counts[log_message] = {
        n.name: countDiscarded(n, log_message)
        for n in nodes
    }
def test_catchup_with_ledger_statuses_in_old_format_from_one_node(
        txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward,
        tconf, tdir, allPluginsPath):
    """
    A node is restarted and during a catch-up receives ledger statuses
    in an old format (without `protocolVersion`) from one of nodes in the pool.
    The test verifies that the node successfully completes the catch-up and
    participates in ordering of further transactions.
    """
    node_to_restart = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]

    old_node = txnPoolNodeSet[0]

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)

    original_get_ledger_status = old_node.getLedgerStatus

    # Patch the method getLedgerStatus to
    # get_ledger_status_without_protocol_version for sending ledger status
    # in old format (without `protocolVersion`)

    def get_ledger_status_without_protocol_version(ledgerId: int):
        original_ledger_status = original_get_ledger_status(ledgerId)
        return LedgerStatusInOldFormat(original_ledger_status.ledgerId,
                                       original_ledger_status.txnSeqNo,
                                       original_ledger_status.viewNo,
                                       original_ledger_status.ppSeqNo,
                                       original_ledger_status.merkleRoot)

    old_node.getLedgerStatus = get_ledger_status_without_protocol_version

    # restart node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node_to_restart)
    looper.removeProdable(name=node_to_restart.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward,
                              2)

    # add `node_to_restart` to pool
    node_to_restart = start_stopped_node(node_to_restart, looper, tconf,
                                         tdir, allPluginsPath)
    txnPoolNodeSet[-1] = node_to_restart
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # Verify that `node_to_restart` successfully completes catch-up
    waitNodeDataEquality(looper, node_to_restart, *other_nodes)

    # check discarding ledger statuses from `old_node` for all ledgers
    assert countDiscarded(node_to_restart,
                          'replied message has invalid structure') >= 3

    # Verify that `node_to_restart` participates in ordering
    # of further transactions
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 5)
    waitNodeDataEquality(looper, node_to_restart, *other_nodes)
def chk(services, log_message, old_discard_counts=None):
    if old_discard_counts is None:
        global node_discard_counts
        old_discard_counts = node_discard_counts
    for s in services:
        assert countDiscarded(
            s, log_message) > old_discard_counts[log_message][s.name]
def test_catchup_with_ledger_statuses_in_old_format_from_one_node(
        txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward, tconf,
        tdir, allPluginsPath):
    """
    A node is restarted and during a catch-up receives ledger statuses
    in an old format (without `protocolVersion`) from one of nodes in the pool.
    The test verifies that the node successfully completes the catch-up and
    participates in ordering of further transactions.
    """
    node_to_restart = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]

    old_node = txnPoolNodeSet[0]

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    original_get_ledger_status = old_node.getLedgerStatus

    # Patch the method getLedgerStatus to
    # get_ledger_status_without_protocol_version for sending ledger status
    # in old format (without `protocolVersion`)

    def get_ledger_status_without_protocol_version(ledgerId: int):
        original_ledger_status = original_get_ledger_status(ledgerId)
        return LedgerStatusInOldFormat(original_ledger_status.ledgerId,
                                       original_ledger_status.txnSeqNo,
                                       original_ledger_status.viewNo,
                                       original_ledger_status.ppSeqNo,
                                       original_ledger_status.merkleRoot)

    old_node.getLedgerStatus = get_ledger_status_without_protocol_version

    # restart node
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            node_to_restart)
    looper.removeProdable(name=node_to_restart.name)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)

    # add `node_to_restart` to pool
    node_to_restart = start_stopped_node(node_to_restart, looper, tconf, tdir,
                                         allPluginsPath)
    txnPoolNodeSet[-1] = node_to_restart
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # Verify that `node_to_restart` successfully completes catch-up
    waitNodeDataEquality(looper, node_to_restart, *other_nodes)

    # check discarding ledger statuses from `old_node` for all ledgers
    assert countDiscarded(node_to_restart,
                          'replied message has invalid structure') >= 3

    # Verify that `node_to_restart` participates in ordering
    # of further transactions
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
    waitNodeDataEquality(looper, node_to_restart, *other_nodes)
def test_master_primary_different_from_previous_view_for_itself(
        txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
    """
    After a view change, primary must be different from previous primary for
    master instance, it does not matter for other instance. Break it into
    2 tests, one where the primary is malign and votes for itself but is still
    not made primary in the next view.
    """
    old_view_no = checkViewNoForNodes(txnPoolNodeSet)
    pr = slow_primary(txnPoolNodeSet, 0, delay=10)
    old_pr_node = pr.node

    def _get_undecided_inst_id(self):
        undecideds = [i for i, r in self.replicas
                      if r.isPrimary is None]
        # Try to nominate for the master instance
        return undecideds, 0

    # Patching old primary's elector's method to nominate itself
    # again for the the new view
    old_pr_node.elector._get_undecided_inst_id = types.MethodType(
        _get_undecided_inst_id, old_pr_node.elector)

    # View change happens
    provoke_and_wait_for_view_change(looper,
                                     txnPoolNodeSet,
                                     old_view_no + 1,
                                     sdk_pool_handle,
                                     sdk_wallet_client)

    # Elections done
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
    # New primary is not same as old primary
    assert getPrimaryReplica(txnPoolNodeSet, 0).node.name != old_pr_node.name

    # All other nodes discarded the nomination by the old primary
    for node in txnPoolNodeSet:
        if node != old_pr_node:
            assert countDiscarded(node.elector,
                                  'of master in previous view too') == 1

    # The new primary can still process requests
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              5)
Ejemplo n.º 7
0
def test_master_primary_different_from_previous_view_for_itself(
        txnPoolNodeSet, looper, client1, wallet1):
    """
    After a view change, primary must be different from previous primary for
    master instance, it does not matter for other instance. Break it into
    2 tests, one where the primary is malign and votes for itself but is still
    not made primary in the next view.
    """
    old_view_no = checkViewNoForNodes(txnPoolNodeSet)
    pr = slow_primary(txnPoolNodeSet, 0, delay=10)
    old_pr_node = pr.node

    def _get_undecided_inst_id(self):
        undecideds = [i for i, r in enumerate(self.replicas)
                      if r.isPrimary is None]
        # Try to nominate for the master instance
        return undecideds, 0

    # Patching old primary's elector's method to nominate itself
    # again for the the new view
    old_pr_node.elector._get_undecided_inst_id = types.MethodType(
        _get_undecided_inst_id, old_pr_node.elector)

    # View change happens
    provoke_and_wait_for_view_change(looper,
                                     txnPoolNodeSet,
                                     old_view_no + 1,
                                     wallet1,
                                     client1)

    # Elections done
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
    # New primary is not same as old primary
    assert getPrimaryReplica(txnPoolNodeSet, 0).node.name != old_pr_node.name

    # All other nodes discarded the nomination by the old primary
    for node in txnPoolNodeSet:
        if node != old_pr_node:
            assert countDiscarded(node.elector,
                                  'of master in previous view too') == 1

    # The new primary can still process requests
    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 5)
def chk(nodes, log_message):
    global discard_counts
    for n in nodes:
        assert countDiscarded(
            n, log_message) > discard_counts[log_message][n.name]
def fill_counters(nodes, log_message):
    global discard_counts
    discard_counts[log_message] = {
        n.name: countDiscarded(n, log_message)
        for n in nodes
    }
Ejemplo n.º 10
0
 def discardCounts(replicas, pat):
     counts = {}
     for r in replicas:
         counts[r.name] = countDiscarded(r, pat)
     return counts
def chk(nodes, log_message):
    global discard_counts
    for n in nodes:
        assert countDiscarded(
            n, log_message) > discard_counts[log_message][n.name]
def fill_counters(nodes, log_message):
    global discard_counts
    discard_counts[log_message] = {n.name: countDiscarded(n, log_message)
                                   for n in nodes}
 def count_discarded(inst_id, count):
     for node in other_nodes:
         assert countDiscarded(node.replicas[inst_id],
                               'already ordered 3 phase message') == count
def discardCounts(replicas, pat):
    counts = {}
    for r in replicas:
        counts[r.name] = countDiscarded(r, pat)
    return counts
 def count_discarded(inst_id, count):
     for node in other_nodes:
         assert countDiscarded(node.replicas[inst_id],
                               'already ordered 3 phase message') == count
def discardCounts(checkpoint_services, pat):
    counts = {}
    for r in checkpoint_services:
        counts[str(r)] = countDiscarded(r, pat)
    return counts