def test_future_primaries_replicas_increase(looper, txnPoolNodeSet, sdk_pool_handle,
                                            sdk_wallet_stewards, tdir, tconf, allPluginsPath):
    # Don't delete NodeStates, so we could check them.
    global old_commit
    old_commit = txnPoolNodeSet[0].write_manager.future_primary_handler.commit_batch
    for node in txnPoolNodeSet:
        node.write_manager.future_primary_handler.commit_batch = lambda three_pc_batch, prev_handler_result=None: 0

    initial_primaries = copy.copy(txnPoolNodeSet[0].primaries)
    last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
    starting_view_number = checkViewNoForNodes(txnPoolNodeSet)

    # Increase replicas count
    add_new_node(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_stewards[0], tdir, tconf, allPluginsPath)

    new_view_no = checkViewNoForNodes(txnPoolNodeSet)
    assert new_view_no == starting_view_number + 1
    # "seq_no + 2" because 1 domain and 1 pool txn.

    node = txnPoolNodeSet[0]
    with delay_rules(node.nodeIbStasher, cDelay()):
        req = sdk_send_random_and_check(looper, txnPoolNodeSet,
                                        sdk_pool_handle,
                                        sdk_wallet_stewards[0], 1)[0][0]
        req = Request(**req)
        three_pc_batch = ThreePcBatch(DOMAIN_LEDGER_ID, 0, 0, 1, time.time(),
                                      randomString(),
                                      randomString(),
                                      ['a', 'b', 'c'], [req.digest], pp_digest='')
        primaries = node.write_manager.future_primary_handler.post_batch_applied(three_pc_batch)
        assert len(primaries) == len(initial_primaries) + 1
        assert len(primaries) == len(node.primaries)
def test_future_primaries_replicas_increase(looper, txnPoolNodeSet,
                                            sdk_pool_handle,
                                            sdk_wallet_stewards, tdir, tconf,
                                            allPluginsPath):
    # Don't delete NodeStates, so we could check them.
    global old_commit
    old_commit = txnPoolNodeSet[
        0].write_manager.future_primary_handler.commit_batch
    for node in txnPoolNodeSet:
        node.write_manager.future_primary_handler.commit_batch = lambda three_pc_batch, prev_handler_result=None: 0

    initial_primaries = copy.copy(txnPoolNodeSet[0].primaries)
    last_ordered = txnPoolNodeSet[0].master_replica.last_ordered_3pc
    starting_view_number = checkViewNoForNodes(txnPoolNodeSet)

    # Increase replicas count
    add_new_node(looper, txnPoolNodeSet, sdk_pool_handle,
                 sdk_wallet_stewards[0], tdir, tconf, allPluginsPath)

    new_view_no = checkViewNoForNodes(txnPoolNodeSet)
    assert new_view_no == starting_view_number + 1
    # "seq_no + 2" because 1 domain and 1 pool txn.
    state = txnPoolNodeSet[0].write_manager.future_primary_handler.node_states[
        -1]
    assert len(state.primaries) == len(initial_primaries) + 1
    assert len(state.primaries) == len(txnPoolNodeSet[0].primaries)
Ejemplo n.º 3
0
def test_audit_multiple_uncommitted_node_regs(looper, tdir, tconf,
                                              allPluginsPath, txnPoolNodeSet,
                                              sdk_pool_handle,
                                              sdk_wallet_client,
                                              sdk_wallet_steward):
    '''
    - Delay COMMITS on 1 Node
    - Add 2 more nodes (so that the slow node hs multiple uncommitted node txns)
    - Make sure that all nodes have equal state eventually
    '''
    slow_node = txnPoolNodeSet[-1]
    fast_nodes = [node for node in txnPoolNodeSet if node != slow_node]
    slow_stashers = [slow_node.nodeIbStasher]

    # let's ignore view changes for simplicity here
    start_delaying([n.nodeIbStasher for n in txnPoolNodeSet], icDelay())

    with delay_rules(slow_stashers, cDelay()):
        # Add Node5
        new_node = add_new_node(looper,
                                fast_nodes,
                                sdk_pool_handle,
                                sdk_wallet_steward,
                                tdir,
                                tconf,
                                allPluginsPath,
                                name='Psi',
                                wait_till_added=False)
        txnPoolNodeSet.append(new_node)
        start_delaying(new_node.nodeIbStasher, icDelay())

        # Add Node6
        new_node = add_new_node(looper,
                                fast_nodes,
                                sdk_pool_handle,
                                sdk_wallet_steward,
                                tdir,
                                tconf,
                                allPluginsPath,
                                name='Eta',
                                wait_till_added=False)
        txnPoolNodeSet.append(new_node)
        start_delaying(new_node.nodeIbStasher, icDelay())

    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
Ejemplo n.º 4
0
def test_set_H_greater_then_last_ppseqno(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_steward,
                                         tdir,
                                         tconf,
                                         allPluginsPath):
    # send LOG_SIZE requests and check, that all watermarks on all replicas is not changed
    # and now is (0, LOG_SIZE)
    """Send random requests for moving watermarks"""
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, LOG_SIZE)
    # check, that all of node set up watermark greater, then default and
    # ppSeqNo with number LOG_SIZE + 1 will be out from default watermark
    assert txnPoolNodeSet[0].replicas[1].last_ordered_3pc[1] > 0
    looper.runFor(30)
    assert txnPoolNodeSet[0].replicas[1].last_ordered_3pc[1] == LOG_SIZE
    for n in txnPoolNodeSet:
        for r in n.replicas._replicas.values():
            assert r.h >= LOG_SIZE
            assert r.H >= LOG_SIZE + LOG_SIZE
    """Adding new node, for scheduling propagate primary procedure"""
    new_node = add_new_node(looper, txnPoolNodeSet, sdk_pool_handle,
                            sdk_wallet_steward, tdir, tconf, allPluginsPath)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet,
                                    exclude_from_check=['check_last_ordered_3pc_backup'])
    """Check, that backup replicas set watermark as (0, maxInt)"""
    # Check, replica.h is set from last_ordered_3PC and replica.H is set to maxsize
    for r in new_node.replicas.values():
        assert r.h == r.last_ordered_3pc[1]
        if r.isMaster:
            assert r.H == r.last_ordered_3pc[1] + LOG_SIZE
        else:
            assert r.H == sys.maxsize
    """Send requests and check. that backup replicas does not stashing it by outside watermarks reason"""
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1)
    # check, that there is no any stashed "outside watermark" messages.
    for r in new_node.replicas.values():
        assert r.stasher.stash_size(STASH_WATERMARKS) == 0

    """Force view change and check, that all backup replicas setup H as a default<
    (not propagate primary logic)"""
    """This need to ensure, that next view_change does not break watermark setting logic"""

    ensure_view_change(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)
    for r in new_node.replicas.values():
        if not r.isMaster:
            assert r.h == 0
            assert r.H == LOG_SIZE
def test_set_H_greater_then_last_ppseqno(looper,
                                         txnPoolNodeSet,
                                         sdk_pool_handle,
                                         sdk_wallet_steward,
                                         tdir,
                                         tconf,
                                         allPluginsPath):
    # send LOG_SIZE requests and check, that all watermarks on all replicas is not changed
    # and now is (0, LOG_SIZE)
    """Send random requests for moving watermarks"""
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, LOG_SIZE)
    # check, that all of node set up watermark greater, then default and
    # ppSeqNo with number LOG_SIZE + 1 will be out from default watermark
    for n in txnPoolNodeSet:
        for r in n.replicas._replicas.values():
            assert r.h >= LOG_SIZE
            assert r.H >= LOG_SIZE + LOG_SIZE
    """Adding new node, for scheduling propagate primary procedure"""
    new_node = add_new_node(looper, txnPoolNodeSet, sdk_pool_handle,
                            sdk_wallet_steward, tdir, tconf, allPluginsPath)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    """Check, that backup replicas set watermark as (0, maxInt)"""
    # Check, replica.h is set from last_ordered_3PC and replica.H is set to maxsize
    for r in new_node.replicas.values():
        assert r.h == r.last_ordered_3pc[1]
        if r.isMaster:
            assert r.H == r.last_ordered_3pc[1] + LOG_SIZE
        else:
            assert r.H == sys.maxsize
    """Send requests and check. that backup replicas does not stashing it by outside watermarks reason"""
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 1)
    # check, that there is no any stashed "outside watermark" messages.
    for r in new_node.replicas.values():
        assert len(r.stashingWhileOutsideWaterMarks) == 0

    """Force view change and check, that all backup replicas setup H as a default
    (not propagate primary logic)"""
    """This need to ensure, that next view_change does not break watermark setting logic"""

    ensure_view_change(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)
    for r in new_node.replicas.values():
        if not r.isMaster:
            assert r.h == 0
            assert r.H == LOG_SIZE
Ejemplo n.º 6
0
def check_view_change_adding_new_node(looper,
                                      tdir,
                                      tconf,
                                      allPluginsPath,
                                      txnPoolNodeSet,
                                      sdk_pool_handle,
                                      sdk_wallet_client,
                                      sdk_wallet_steward,
                                      slow_nodes=[],
                                      delay_commit=False,
                                      delay_pre_prepare=False):
    # Pre-requisites: viewNo=3, Primary is Node4
    for viewNo in range(1, 4):
        trigger_view_change(txnPoolNodeSet)
        waitForViewChange(looper, txnPoolNodeSet, viewNo)
        ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)

    # Delay 3PC messages on slow nodes
    fast_nodes = [node for node in txnPoolNodeSet if node not in slow_nodes]
    slow_stashers = [slow_node.nodeIbStasher for slow_node in slow_nodes]
    delayers = []
    if delay_pre_prepare:
        delayers.append(ppDelay())
        delayers.append(msg_rep_delay(types_to_delay=[PREPREPARE]))
    if delay_commit:
        delayers.append(cDelay())

    with delay_rules_without_processing(slow_stashers, *delayers):
        # Add Node5
        new_node = add_new_node(looper, fast_nodes, sdk_pool_handle,
                                sdk_wallet_steward, tdir, tconf,
                                allPluginsPath)
        old_set = list(txnPoolNodeSet)
        txnPoolNodeSet.append(new_node)

        # Trigger view change
        trigger_view_change(txnPoolNodeSet)

        # make sure view change is finished eventually
        waitForViewChange(looper, old_set, 4)
        ensureElectionsDone(looper, old_set)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
def test_6th_node_join_after_view_change_by_master_restart(
        looper, txnPoolNodeSet, tdir, tconf, allPluginsPath, sdk_pool_handle,
        sdk_wallet_steward, limitTestRunningTime):
    """
    Test steps:
    1. start pool of 4 nodes
    2. force 4 view change by restarting primary node
    3. now primary node must be Alpha, then add new node, named Epsilon
    4. ensure, that Epsilon was added and catch-up done
    5. send some txns
    6. force 4 view change. Now primary node is new added Epsilon
    7. add 6th node and ensure, that new node is catchuped
    """
    for __ in range(4):
        ensure_view_change(looper,
                           txnPoolNodeSet,
                           custom_timeout=tconf.VIEW_CHANGE_TIMEOUT)

    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        customTimeout=tconf.VIEW_CHANGE_TIMEOUT)
    timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet))
    for node in txnPoolNodeSet:
        looper.run(eventually(catchuped, node, timeout=2 * timeout))
    ensure_all_nodes_have_same_data(looper,
                                    txnPoolNodeSet,
                                    custom_timeout=timeout)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    new_epsilon_node = add_new_node(looper,
                                    txnPoolNodeSet,
                                    sdk_pool_handle,
                                    sdk_wallet_steward,
                                    tdir,
                                    tconf,
                                    allPluginsPath,
                                    name='Epsilon')
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
    """
    check that pool and domain ledgers for new node are in synced state
    """
    timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet))
    for node in txnPoolNodeSet:
        looper.run(
            eventually(check_ledger_state,
                       node,
                       DOMAIN_LEDGER_ID,
                       LedgerState.synced,
                       retryWait=.5,
                       timeout=timeout))
        looper.run(
            eventually(check_ledger_state,
                       node,
                       POOL_LEDGER_ID,
                       LedgerState.synced,
                       retryWait=.5,
                       timeout=timeout))
    for __ in range(4):
        ensure_view_change(looper,
                           txnPoolNodeSet,
                           custom_timeout=tconf.VIEW_CHANGE_TIMEOUT)

    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        customTimeout=tconf.VIEW_CHANGE_TIMEOUT)
    timeout = waits.expectedPoolCatchupTime(nodeCount=len(txnPoolNodeSet))
    for node in txnPoolNodeSet:
        looper.run(eventually(catchuped, node, timeout=3 * timeout))
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)
    new_psi_node = add_new_node(looper,
                                txnPoolNodeSet,
                                sdk_pool_handle,
                                sdk_wallet_steward,
                                tdir,
                                tconf,
                                allPluginsPath,
                                name='Psi')
    looper.run(
        eventually(check_ledger_state,
                   new_psi_node,
                   DOMAIN_LEDGER_ID,
                   LedgerState.synced,
                   retryWait=.5,
                   timeout=5))
    looper.run(
        eventually(check_ledger_state,
                   new_psi_node,
                   POOL_LEDGER_ID,
                   LedgerState.synced,
                   retryWait=.5,
                   timeout=5))