def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir,
                                    allPluginsPath, sdk_pool_handle,
                                    sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restart half of nodes
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(
        len(txnPoolNodeSet))
    nodes_before_restart = txnPoolNodeSet.copy()
    restart_nodes(looper,
                  txnPoolNodeSet,
                  txnPoolNodeSet[2:],
                  tconf,
                  tdir,
                  allPluginsPath,
                  after_restart_timeout=tm,
                  start_one_by_one=False)

    # Check that nodes didn't think they may have inconsistent 3PC state
    for node in nodes_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes don't think they may have inconsistent 3PC state
    for node in txnPoolNodeSet:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
Exemple #2
0
def test_restart_majority_to_same_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                        sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    majority = txnPoolNodeSet[:3]
    minority = txnPoolNodeSet[3:]

    # Restart majority group
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    majority_before_restart = majority.copy()
    restart_nodes(looper, txnPoolNodeSet, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, majority, instances_list=range(2))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, txnPoolNodeSet, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_restart_majority_to_same_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                        sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    majority = txnPoolNodeSet[:3]
    minority = txnPoolNodeSet[3:]

    # Restart majority group
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    majority_before_restart = majority.copy()
    restart_nodes(looper, txnPoolNodeSet, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, majority, instances_list=range(2))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, txnPoolNodeSet, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_restart_groups_6_of_7_np_no_tm(looper, txnPoolNodeSet, tconf, tdir,
                                        sdk_pool_handle, sdk_wallet_client, allPluginsPath):
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    restart_group = get_group(txnPoolNodeSet, 6, include_primary=False)

    restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                                  sdk_pool_handle, sdk_wallet_client):
    restart_timeout = tconf.ToleratePrimaryDisconnection + \
                      waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    primary = txnPoolNodeSet[0]
    alive_nodes = txnPoolNodeSet[1:]
    minority = alive_nodes[-1:]
    majority = alive_nodes[:-1]

    # Move to higher view by killing primary
    primary.cleanupOnStopping = True
    primary.stop()
    looper.removeProdable(primary)
    ensure_node_disconnected(looper, primary, txnPoolNodeSet)
    waitForViewChange(looper, alive_nodes, 1, customTimeout=VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, alive_nodes, numInstances=3)

    # Add transaction to ledger
    sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1)

    # Restart majority group
    majority_before_restart = majority.copy()
    restart_nodes(looper, alive_nodes, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    waitForViewChange(looper, majority, 1, customTimeout=2.1 * VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, majority, numInstances=3)

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, alive_nodes, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, alive_nodes, numInstances=3)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle)
def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                                  sdk_pool_handle, sdk_wallet_client):
    restart_timeout = tconf.ToleratePrimaryDisconnection + \
                      waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    primary = txnPoolNodeSet[0]
    alive_nodes = txnPoolNodeSet[1:]
    minority = alive_nodes[-1:]
    majority = alive_nodes[:-1]

    # Move to higher view by killing primary
    primary.cleanupOnStopping = True
    primary.stop()
    looper.removeProdable(primary)
    ensure_node_disconnected(looper, primary, txnPoolNodeSet)
    waitForViewChange(looper, alive_nodes, 1, customTimeout=VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, alive_nodes, instances_list=range(3))

    # Add transaction to ledger
    sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1)

    # Restart majority group
    majority_before_restart = majority.copy()
    restart_nodes(looper, alive_nodes, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    waitForViewChange(looper, majority, 1, customTimeout=2.1 * VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, majority, instances_list=range(3))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, alive_nodes, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, alive_nodes, instances_list=range(3))

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle)
Exemple #7
0
def test_view_change_with_instance_change_lost_due_to_restarts(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf,
        tdir, allPluginsPath):
    """
    1. some_nodes (Beta and Gamma) send InstanceChange for all nodes.
    2. Restart other_nodes (Gamma and Delta)
    3. last_node (Delta) send InstanceChange for all nodes.
    4. Ensure elections done and pool is functional
    """
    current_view_no = txnPoolNodeSet[0].viewNo
    some_nodes = txnPoolNodeSet[1:3]
    other_nodes = txnPoolNodeSet[2:4]

    for n in some_nodes:
        send_test_instance_change(n)

    def check_ic_delivery():
        for node in txnPoolNodeSet:
            vct_service = node.master_replica._view_change_trigger_service
            assert all(
                vct_service._instance_changes.has_inst_chng_from(
                    current_view_no + 1, sender.name) for sender in some_nodes)

    looper.run(eventually(check_ic_delivery))

    restart_nodes(looper,
                  txnPoolNodeSet,
                  other_nodes,
                  tconf,
                  tdir,
                  allPluginsPath,
                  start_one_by_one=False)

    last_node = txnPoolNodeSet[-1]
    send_test_instance_change(last_node)
    waitForViewChange(looper,
                      txnPoolNodeSet,
                      current_view_no + 1,
                      customTimeout=3 * FRESHNESS_TIMEOUT)

    ensureElectionsDone(looper, txnPoolNodeSet)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
Exemple #8
0
def test_first_audit_catchup_during_ordering(monkeypatch,
                                             looper, tconf, tdir, allPluginsPath, txnPoolNodeSet,
                                             sdk_pool_handle, sdk_wallet_client):
    # 1. patch primaries in audit ledger
    for n in txnPoolNodeSet:
        patch_primaries_in_audit(n, monkeypatch)

    # 2. order a txn
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    # 3. restart Nodes 3 and 4
    restart_nodes(looper, txnPoolNodeSet, txnPoolNodeSet[2:], tconf, tdir, allPluginsPath, start_one_by_one=False)
    for n in txnPoolNodeSet[2:]:
        patch_primaries_in_audit(n, monkeypatch)

    # 5. make sure that all node have equal Priamries and can order
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
Exemple #9
0
def test_view_change_with_instance_change_lost_due_to_restarts(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf,
        tdir, allPluginsPath):
    """
    1. some_nodes (Beta and Gamma) send InstanceChange for all nodes.
    2. Restart other_nodes (Gamma and Delta)
    3. last_node (Delta) send InstanceChange for all nodes.
    4. Ensure elections done and pool is functional
    """
    current_view_no = txnPoolNodeSet[0].viewNo
    some_nodes = txnPoolNodeSet[1:3]
    other_nodes = txnPoolNodeSet[2:4]

    for n in some_nodes:
        n.view_changer.on_master_degradation()

    def check_ic_delivery():
        for node in txnPoolNodeSet:
            assert node.view_changer.instanceChanges._votes_count(
                current_view_no + 1) == 2

    looper.run(eventually(check_ic_delivery))

    restart_nodes(looper,
                  txnPoolNodeSet,
                  other_nodes,
                  tconf,
                  tdir,
                  allPluginsPath,
                  start_one_by_one=False)

    last_node = txnPoolNodeSet[-1]
    last_node.view_changer.on_master_degradation()
    waitForViewChange(looper,
                      txnPoolNodeSet,
                      current_view_no + 1,
                      customTimeout=3 * FRESHNESS_TIMEOUT)

    ensureElectionsDone(looper, txnPoolNodeSet)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                    sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restart half of nodes
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    nodes_before_restart = txnPoolNodeSet.copy()
    restart_nodes(looper, txnPoolNodeSet, txnPoolNodeSet[2:], tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that nodes didn't think they may have inconsistent 3PC state
    for node in nodes_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes don't think they may have inconsistent 3PC state
    for node in txnPoolNodeSet:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, num_reqs=2, num_batches=1)