Esempio n. 1
0
def txnPoolNodeSet(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward,
                   tconf, tdirWithPoolTxns, allPluginsPath):
    logger.debug("Do several view changes to round the list of primaries")

    assert txnPoolNodeSet[0].viewNo == len(txnPoolNodeSet) - 1

    logger.debug(
        "Do view change to reach viewNo {}".format(txnPoolNodeSet[0].viewNo +
                                                   1))
    ensure_view_change_complete(looper, txnPoolNodeSet)
    logger.debug("Send requests to ensure that pool is working properly, "
                 "viewNo: {}".format(txnPoolNodeSet[0].viewNo))
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.debug("Pool is ready, current viewNo: {}".format(
        txnPoolNodeSet[0].viewNo))

    # TODO find out and fix why additional view change could happen
    # because of degarded master. It's critical for current test to have
    # view change completed for the time when new node is joining.
    # Thus, disable master degradation check as it won't impact the case
    # and guarantees necessary state.
    for node in txnPoolNodeSet:
        node.monitor.isMasterDegraded = lambda: False

    return txnPoolNodeSet
def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir,
                                    allPluginsPath, sdk_pool_handle,
                                    sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restart half of nodes
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(
        len(txnPoolNodeSet))
    nodes_before_restart = txnPoolNodeSet.copy()
    restart_nodes(looper,
                  txnPoolNodeSet,
                  txnPoolNodeSet[2:],
                  tconf,
                  tdir,
                  allPluginsPath,
                  after_restart_timeout=tm,
                  start_one_by_one=False)

    # Check that nodes didn't think they may have inconsistent 3PC state
    for node in nodes_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes don't think they may have inconsistent 3PC state
    for node in txnPoolNodeSet:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
Esempio n. 3
0
def test_last_ordered_3pc_reset_if_more_than_new_view(txnPoolNodeSet, looper,
                                                      sdk_pool_handle,
                                                      sdk_wallet_client):
    """
    Check that if last_ordered_3pc's viewNo on a Replica is greater than the new viewNo after view change,
    then last_ordered_3pc is reset to (0,0).
    It can be that last_ordered_3pc was set for the previous view, since it's set during catch-up

    Example: a Node has last_ordered = (1, 300), and then the whole pool except this node restarted.
    The new viewNo is 0, but last_ordered is (1, 300), so all new requests will be discarded by this Node
    if we don't reset last_ordered_3pc
    """
    old_view_no = checkViewNoForNodes(txnPoolNodeSet)
    for node in txnPoolNodeSet:
        node.master_replica.last_ordered_3pc = (old_view_no + 2, 100)

    ensure_view_change_complete(looper, txnPoolNodeSet, customTimeout=60)
    view_no = checkViewNoForNodes(txnPoolNodeSet)

    for node in txnPoolNodeSet:
        assert (view_no, 0) == node.master_replica.last_ordered_3pc

    # Make sure the pool is working
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 5)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_ledger_status_after_catchup(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle):
    # we expect last ordered 3PC is not None for Domain ledger only, as there is a txn added to Domain ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change_complete(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    check_ledger_statuses(txnPoolNodeSet)
Esempio n. 5
0
def test_revert_auth_rule_changing(looper, txnPoolNodeSet, sdk_wallet_trustee,
                                   sdk_wallet_steward, sdk_pool_handle):
    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
    wh, _ = sdk_wallet_trustee
    new_steward_did, new_steward_verkey = create_verkey_did(looper, wh)
    new_steward_did2, new_steward_verkey2 = create_verkey_did(looper, wh)
    """We try to change rule for adding new steward. For this case we """
    changed_constraint = AuthConstraint(role=STEWARD, sig_count=1)
    action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD)
    with delay_rules_without_processing(
            node_stashers, pDelay(), cDelay(),
            msg_rep_delay(types_to_delay=[PREPARE, COMMIT])):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_action=ADD_PREFIX,
            auth_type=action.txn_type,
            field=action.field,
            new_value=action.value,
            old_value=None,
            constraint=changed_constraint.as_dict,
            no_wait=True)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Try to add new steward by already existed trustee.
        Validation should raise exception because we change uncommitted state
        by adding new rule, that "Only steward can add new steward"
        """
        with pytest.raises(RequestRejectedException,
                           match="Not enough STEWARD signatures"):
            sdk_add_new_nym(looper,
                            sdk_pool_handle,
                            sdk_wallet_trustee,
                            'newSteward1',
                            STEWARD_STRING,
                            dest=new_steward_did,
                            verkey=new_steward_verkey)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Catchup should revert config_state and discard rule changing
        """
        for n in txnPoolNodeSet:
            n.requests.clear()
        ensure_view_change_complete(looper, txnPoolNodeSet)
    """
    Try to create new steward by steward
    We can not do this, because AUTH_RULE txn was reverted
    """
    with pytest.raises(RequestRejectedException,
                       match="Not enough TRUSTEE signatures"):
        sdk_add_new_nym(looper,
                        sdk_pool_handle,
                        sdk_wallet_steward,
                        'newSteward2',
                        STEWARD_STRING,
                        dest=new_steward_did2,
                        verkey=new_steward_verkey2)
Esempio n. 6
0
def test_revert_for_all_after_view_change(looper, helpers,
                                          nodeSetWithIntegratedTokenPlugin,
                                          sdk_pool_handle, fees_set,
                                          mint_tokens, addresses, fees):
    node_set = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = get_seq_no(mint_tokens)
    reverted_node = nodeSetWithIntegratedTokenPlugin[-1]

    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)
    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, addresses, fees, looper, current_amount, seq_no)

    ensure_all_nodes_have_same_data(looper, node_set)

    with delay_rules([n.nodeIbStasher for n in node_set], cDelay(), pDelay()):
        len_batches_before = len(reverted_node.master_replica.batches)
        current_amount, seq_no, resp1 = send_and_check_transfer(
            helpers,
            addresses,
            fees,
            looper,
            current_amount,
            seq_no,
            check_reply=False)
        current_amount, seq_no, resp2 = send_and_check_nym_with_fees(
            helpers,
            fees_set,
            seq_no,
            looper,
            addresses,
            current_amount,
            check_reply=False)
        looper.runFor(
            waits.expectedPrePrepareTime(
                len(nodeSetWithIntegratedTokenPlugin)))
        len_batches_after = len(reverted_node.master_replica.batches)
        """
        Checks, that we have a 2 new batches
        """
        assert len_batches_after - len_batches_before == 2
        for n in node_set:
            n.view_changer.on_master_degradation()

        ensure_view_change_complete(looper, nodeSetWithIntegratedTokenPlugin)

        looper.run(
            eventually(
                lambda: assertExp(reverted_node.mode == Mode.participating)))
    ensure_all_nodes_have_same_data(looper, node_set)
    sdk_get_and_check_replies(looper, resp1)
    sdk_get_and_check_replies(looper, resp2)
    send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                 current_amount)
    ensure_all_nodes_have_same_data(looper, node_set)
def test_ledger_status_after_catchup(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle):
    # we expect last ordered 3PC is not None for Domain ledger only, as there is a txn added to Domain ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change_complete(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    check_ledger_statuses(txnPoolNodeSet,
                          pool_last_ordered_3pc=(None, None),
                          domain_last_ordered_3pc=txnPoolNodeSet[0].master_last_ordered_3PC,
                          config_last_ordered_3pc=(None, None))
def test_view_change_can_finish_despite_perpetual_catchup(txnPoolNodeSet, looper, tconf):
    # Make nodes think that there is perpetual catchup
    old_methods = [n.num_txns_caught_up_in_last_catchup for n in txnPoolNodeSet]
    for n in txnPoolNodeSet:
        n.num_txns_caught_up_in_last_catchup = types.MethodType(lambda _n: 10, n)

    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restore node behaviour
    for n, old_method in zip(txnPoolNodeSet, old_methods):
        n.num_txns_caught_up_in_last_catchup = old_method
def test_restart_majority_to_lower_view(looper, txnPoolNodeSet, tconf, tdir,
                                        allPluginsPath, sdk_pool_handle,
                                        sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    majority = txnPoolNodeSet[:3]
    minority = txnPoolNodeSet[3:]

    # Restart majority group
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(
        len(txnPoolNodeSet))
    majority_before_restart = majority.copy()
    restart_nodes(looper,
                  txnPoolNodeSet,
                  majority,
                  tconf,
                  tdir,
                  allPluginsPath,
                  after_restart_timeout=tm,
                  start_one_by_one=False,
                  wait_for_elections=False)
    ensureElectionsDone(looper, majority, numInstances=2)

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper,
                  txnPoolNodeSet,
                  minority,
                  tconf,
                  tdir,
                  allPluginsPath,
                  after_restart_timeout=tm,
                  start_one_by_one=False)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
Esempio n. 10
0
def txnPoolNodeSet(txnPoolNodeSet, looper, client1, wallet1, client1Connected,
                   tconf, tdirWithPoolTxns, allPluginsPath):
    logger.debug("Do several view changes to round the list of primaries")

    assert txnPoolNodeSet[0].viewNo == len(txnPoolNodeSet) - 1

    logger.debug(
        "Do view change to reach viewNo {}".format(txnPoolNodeSet[0].viewNo +
                                                   1))
    ensure_view_change_complete(looper, txnPoolNodeSet)
    logger.debug("Send requests to ensure that pool is working properly, "
                 "viewNo: {}".format(txnPoolNodeSet[0].viewNo))
    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, numReqs=3)

    return txnPoolNodeSet
Esempio n. 11
0
def test_view_change_can_finish_despite_perpetual_catchup(
        txnPoolNodeSet, looper, tconf):
    # Make nodes think that there is perpetual catchup
    old_methods = [
        n.num_txns_caught_up_in_last_catchup for n in txnPoolNodeSet
    ]
    for n in txnPoolNodeSet:
        n.num_txns_caught_up_in_last_catchup = types.MethodType(
            lambda _n: 10, n)

    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restore node behaviour
    for n, old_method in zip(txnPoolNodeSet, old_methods):
        n.num_txns_caught_up_in_last_catchup = old_method
def test_last_ordered_3pc_not_reset_if_more_than_new_view(
        txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Check that if last_ordered_3pc's viewNo on a Replica is greater than the new viewNo after view change,
    then last_ordered_3pc is not reset.
    It can be that last_ordered_3pc was set for the next view, since it's set during catch-up

    """
    old_view_no = checkViewNoForNodes(txnPoolNodeSet)
    for node in txnPoolNodeSet:
        node.master_replica.last_ordered_3pc = (old_view_no + 2, 100)

    ensure_view_change_complete(looper, txnPoolNodeSet, customTimeout=60)

    for node in txnPoolNodeSet:
        assert (old_view_no + 2, 100) == node.master_replica.last_ordered_3pc
def test_last_ordered_3pc_not_reset_if_less_than_new_view(
        txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Check that if last_ordered_3pc's viewNo on a Replica is less than the new viewNo after view change,
    then last_ordered_3pc is not reset.
    """
    old_view_no = checkViewNoForNodes(txnPoolNodeSet)
    for node in txnPoolNodeSet:
        node.master_replica.last_ordered_3pc = (old_view_no, 100)

    ensure_view_change_complete(looper, txnPoolNodeSet, customTimeout=60)

    for node in txnPoolNodeSet:
        assert (old_view_no + 1, 1) == node.master_replica.last_ordered_3pc

    # Make sure the pool is working
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 5)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
Esempio n. 14
0
def test_last_ordered_3pc_not_reset_if_equal_to_new_view(
        txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Check that if last_ordered_3pc's viewNo on a Replica is equal to the new viewNo after view change,
    then last_ordered_3pc is not reset.
    It can be that last_ordered_3pc was set for the next view, since it's set during catch-up

    """
    global batches_count
    old_view_no = checkViewNoForNodes(txnPoolNodeSet)
    for node in txnPoolNodeSet:
        node.master_replica.last_ordered_3pc = (old_view_no + 1, batches_count)
        node.master_replica._ordering_service.lastPrePrepareSeqNo = batches_count

    ensure_view_change_complete(looper, txnPoolNodeSet, customTimeout=60)
    batches_count += 1

    for node in txnPoolNodeSet:
        assert (old_view_no + 1,
                batches_count) == node.master_replica.last_ordered_3pc
def test_last_ordered_3pc_not_reset_if_less_than_new_view(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_client):
    """
    Check that if last_ordered_3pc's viewNo on a Replica is equal to the new viewNo after view change,
    then last_ordered_3pc is reset to (0,0).
    It can be that last_ordered_3pc was set for the previous view, since it's set during catch-up

    Example: a Node has last_ordered = (1, 300), and then the whole pool except this node restarted.
    The new viewNo is 0, but last_ordered is (1, 300), so all new requests will be discarded by this Node
    if we don't reset last_ordered_3pc
    """
    old_view_no = checkViewNoForNodes(txnPoolNodeSet)
    for node in txnPoolNodeSet:
        node.master_replica.last_ordered_3pc = (old_view_no, 100)

    ensure_view_change_complete(looper, txnPoolNodeSet, customTimeout=60)

    for node in txnPoolNodeSet:
        assert (old_view_no, 100) == node.master_replica.last_ordered_3pc

    # Make sure the pool is working
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_restart_majority_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                        sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    majority = txnPoolNodeSet[:3]
    minority = txnPoolNodeSet[3:]

    # Restart majority group
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    majority_before_restart = majority.copy()
    restart_nodes(looper, txnPoolNodeSet, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, majority, instances_list=range(2))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, txnPoolNodeSet, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                    sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restart half of nodes
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    nodes_before_restart = txnPoolNodeSet.copy()
    restart_nodes(looper, txnPoolNodeSet, txnPoolNodeSet[2:], tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that nodes didn't think they may have inconsistent 3PC state
    for node in nodes_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes don't think they may have inconsistent 3PC state
    for node in txnPoolNodeSet:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, num_reqs=2, num_batches=1)
Esempio n. 18
0
def test_primary_selection_after_primary_demotion_and_view_changes(
        looper, txnPoolNodeSet, stewardAndWalletForMasterNode,
        txnPoolMasterNodes):
    """
    Demote primary and do multiple view changes forcing primaries rotation.
    Demoted primary should be skipped without additional view changes.
    """

    viewNo0 = checkViewNoForNodes(txnPoolNodeSet)

    logger.info(
        "1. turn off the node which has primary replica for master instanse, "
        " this should trigger view change")
    master_node = txnPoolMasterNodes[0]
    client, wallet = stewardAndWalletForMasterNode
    node_data = {ALIAS: master_node.name, SERVICES: []}
    updateNodeData(looper, client, wallet, master_node, node_data)

    restNodes = [node for node in txnPoolNodeSet \
                    if node.name != master_node.name]
    ensureElectionsDone(looper, restNodes)

    viewNo1 = checkViewNoForNodes(restNodes)

    assert viewNo1 == viewNo0 + 1
    assert master_node.viewNo == viewNo0
    assert len(restNodes[0].replicas) == 1  # only one instance left
    assert restNodes[0].replicas[0].primaryName != master_node.name

    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)

    logger.info("2. force view change 2 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)

    viewNo2 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo2 == viewNo1 + 1

    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)

    logger.info("3. force view change 3 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)
    viewNo3 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo3 == viewNo2 + 1

    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)

    logger.info("4. force view change 4 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)
    viewNo4 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo4 == viewNo3 + 1

    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, numReqs=3)
def txnPoolNodeSet(txnPoolNodeSet, looper, sdk_pool_handle, sdk_wallet_steward,
                   tconf, tdirWithPoolTxns, allPluginsPath):
    logger.debug("Do several view changes to round the list of primaries")

    assert txnPoolNodeSet[0].viewNo == len(txnPoolNodeSet) - 1

    logger.debug("Do view change to reach viewNo {}".format(txnPoolNodeSet[0].viewNo + 1))
    ensure_view_change_complete(looper, txnPoolNodeSet)
    logger.debug("Send requests to ensure that pool is working properly, "
                 "viewNo: {}".format(txnPoolNodeSet[0].viewNo))
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.debug("Pool is ready, current viewNo: {}".format(txnPoolNodeSet[0].viewNo))

    # TODO find out and fix why additional view change could happen
    # because of degarded master. It's critical for current test to have
    # view change completed for the time when new node is joining.
    # Thus, disable master degradation check as it won't impact the case
    # and guarantees necessary state.
    for node in txnPoolNodeSet:
        node.monitor.isMasterDegraded = lambda: False

    return txnPoolNodeSet
def test_view_change_gc_in_between_3pc_all_nodes_delays(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo)
    and does not remove messages from node's queues that have higher
    viewNo than last ordered one even if their ppSeqNo are less or equal
    """

    numNodes = len(txnPoolNodeSet)
    viewNo = checkViewNoForNodes(txnPoolNodeSet)

    # 1 send two messages one by one separately to make
    #  node pool working with two batches
    #    -> last_ordered_3pc = (+0, 2) [+0 means from the initial state]
    #       (last_ordered_3pc here and futher is tracked
    #       for master instances only cause non-master ones have
    #       specific logic of its management which we don't care in
    #       the test, see Replica::_setup_for_non_master)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    last_ordered_3pc = (viewNo, 2)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 2)

    # 2 do view change
    #    -> GC should remove it from nodes' queues
    #    -> viewNo = +1
    ensure_view_change_complete(looper, txnPoolNodeSet)

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 0)

    # 3 slow processing 3PC messages for all nodes (all replica instances)
    #   randomly and send one more message
    #    -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries
    #       should at least send PRE-PREPAREs
    # TODO could it be not enough for wainting that at least primary
    # has sent PRE-PREPARE
    propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes)
    delay_3pc_messages(txnPoolNodeSet,
                       0,
                       delay=propagationTimeout * 2)
    delay_3pc_messages(txnPoolNodeSet,
                       1,
                       delay=propagationTimeout * 2)
    requests = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

    def checkPrePrepareSentAtLeastByPrimary():
        for node in txnPoolNodeSet:
            for replica in node.replicas.values():
                if replica.isPrimary:
                    assert len(replica.sentPrePrepares)

    looper.run(eventually(checkPrePrepareSentAtLeastByPrimary,
                          retryWait=0.1,
                          timeout=propagationTimeout))
    # 4 do view change
    #    -> GC shouldn't remove anything because
    #       last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1)
    #    -> viewNo = 2
    ensure_view_change_complete(looper, txnPoolNodeSet)

    viewNoNew = checkViewNoForNodes(txnPoolNodeSet)
    # another view change could happen because of slow nodes
    assert viewNoNew - viewNo in (1, 2)
    viewNo = viewNoNew
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 5 reset delays and wait for replies
    #    -> new primaries should send new 3pc for last message
    #       with 3pc key (+2, 1)
    #    -> they should be ordered
    #    -> last_ordered_3pc = (+2, 1)
    reset_delays_and_process_delayeds(txnPoolNodeSet)
    sdk_get_replies(looper, [requests])

    checkViewNoForNodes(txnPoolNodeSet, viewNo)
    last_ordered_3pc = (viewNo, 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 6 do view change
    #    -> GC should remove them
    ensure_view_change_complete(looper, txnPoolNodeSet)

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 0)
def test_view_change_gc_in_between_3pc_all_nodes_delays(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    Test that garbage collector compares the whole 3PC key (viewNo, ppSeqNo)
    and does not remove messages from node's queues that have higher
    viewNo than last ordered one even if their ppSeqNo are less or equal
    """

    numNodes = len(txnPoolNodeSet)
    viewNo = checkViewNoForNodes(txnPoolNodeSet)

    # 1 send two messages one by one separately to make
    #  node pool working with two batches
    #    -> last_ordered_3pc = (+0, 2) [+0 means from the initial state]
    #       (last_ordered_3pc here and futher is tracked
    #       for master instances only cause non-master ones have
    #       specific logic of its management which we don't care in
    #       the test, see Replica::_setup_for_non_master)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    batches_count = get_pp_seq_no(txnPoolNodeSet)
    last_ordered_3pc = (viewNo, batches_count)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 2)

    # 2 do view change
    #    -> GC should remove it from nodes' queues
    #    -> viewNo = +1
    ensure_view_change_complete(looper, txnPoolNodeSet)
    batches_count += 1

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    looper.run(
        eventually(check_nodes_last_ordered_3pc, txnPoolNodeSet,
                   (viewNo, batches_count)))
    check_nodes_requests_size(txnPoolNodeSet, 0)

    # 3 slow processing 3PC messages for all nodes (all replica instances)
    #   randomly and send one more message
    #    -> not ordered (last_ordered_3pc still equal (+0, 2)) but primaries
    #       should at least send PRE-PREPAREs
    # TODO could it be not enough for wainting that at least primary
    # has sent PRE-PREPARE
    propagationTimeout = waits.expectedClientRequestPropagationTime(numNodes)
    delay_3pc_messages(txnPoolNodeSet, 0, delay=propagationTimeout * 2)
    delay_3pc_messages(txnPoolNodeSet, 1, delay=propagationTimeout * 2)
    requests = sdk_send_random_request(looper, sdk_pool_handle,
                                       sdk_wallet_client)

    def checkPrePrepareSentAtLeastByPrimary():
        for node in txnPoolNodeSet:
            for replica in node.replicas.values():
                if replica.isPrimary:
                    assert len(replica._ordering_service.sent_preprepares)

    looper.run(
        eventually(checkPrePrepareSentAtLeastByPrimary,
                   retryWait=0.1,
                   timeout=propagationTimeout))
    # 4 do view change
    #    -> GC shouldn't remove anything because
    #       last_ordered_3pc (+0, 1) < last message's 3pc key (+1, 1)
    #    -> viewNo = 2
    ensure_view_change_complete(looper, txnPoolNodeSet)
    batches_count += 1

    viewNoNew = checkViewNoForNodes(txnPoolNodeSet)
    # another view change could happen because of slow nodes
    assert viewNoNew - viewNo in (1, 2)
    viewNo = viewNoNew
    check_nodes_last_ordered_3pc(txnPoolNodeSet,
                                 (last_ordered_3pc[0] + 1, batches_count - 1))
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 5 reset delays and wait for replies
    #    -> new primaries should send new 3pc for last message
    #       with 3pc key (+2, 1)
    #    -> they should be ordered
    #    -> last_ordered_3pc = (+2, 1)
    reset_delays_and_process_delayeds(txnPoolNodeSet)
    sdk_get_replies(looper, [requests])
    batches_count += 1

    checkViewNoForNodes(txnPoolNodeSet, viewNo)
    last_ordered_3pc = (viewNo, batches_count)
    check_nodes_last_ordered_3pc(txnPoolNodeSet, last_ordered_3pc)
    check_nodes_requests_size(txnPoolNodeSet, 1)

    # 6 do view change
    #    -> GC should remove them
    ensure_view_change_complete(looper, txnPoolNodeSet)
    batches_count += 1

    viewNo = checkViewNoForNodes(txnPoolNodeSet, viewNo + 1)
    check_nodes_last_ordered_3pc(txnPoolNodeSet,
                                 (last_ordered_3pc[0] + 1, batches_count))
    check_nodes_requests_size(txnPoolNodeSet, 0)
def view_setup(looper, txnPoolNodeSet):
    for i in range(2):
        ensure_view_change_complete(looper, txnPoolNodeSet)
    for node in txnPoolNodeSet:
        assert node.viewNo == 2
def test_primary_selection_after_demoted_node_promotion(
        looper, txnPoolNodeSet, sdk_node_theta_added,
        sdk_pool_handle,
        tconf, tdir, allPluginsPath):
    """
    Demote non-primary node
    Promote it again
    Restart one node to get the following difference with others:
        - not restarted - node registry and related pool parameters are kept
          in memory in some state which is expected as the same as
          in the pool ledger
        - restarted one - loaded node registry and pool parameters from
          the pool ledger at startup
    Do several view changes and check that all nodes will choose previously
        demoted / promoted node as a primary for some instanse
    """

    new_steward_wallet, new_node = sdk_node_theta_added

    # viewNo0 = checkViewNoForNodes(txnPoolNodeSet)
    check_all_nodes_the_same_pool_list(txnPoolNodeSet)

    logger.info("1. Demote node Theta")

    node_dest = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle,
                         node_dest, new_node.name, None, None, None, None,
                         [])
    remainingNodes = list(set(txnPoolNodeSet) - {new_node})

    check_all_nodes_the_same_pool_list(remainingNodes)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # TODO view change might happen unexpectedly by unknown reason
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    logger.info("2. Promote node Theta back")

    sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle,
                         node_dest, new_node.name, None, None, None, None,
                         [VALIDATOR])

    check_all_nodes_the_same_pool_list(txnPoolNodeSet)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info("3. Restart one node")
    stopped_node = txnPoolNodeSet[0]

    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            stopped_node, stopNode=True)
    looper.removeProdable(stopped_node)
    remainingNodes = list(set(txnPoolNodeSet) - {stopped_node})
    ensureElectionsDone(looper, remainingNodes)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    # start node
    restartedNode = start_stopped_node(stopped_node, looper, tconf,
                                       tdir, allPluginsPath)
    txnPoolNodeSet = remainingNodes + [restartedNode]
    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info("4. Do view changes to check that nodeTheta will be chosen "
                "as a primary for some instance by all nodes after some rounds")
    while txnPoolNodeSet[0].viewNo < 4:
        ensure_view_change_complete(looper, txnPoolNodeSet)
        # ensure pool is working properly
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  new_steward_wallet, 3)
def test_primary_selection_after_primary_demotion_and_view_changes(looper, txnPoolNodeSet,
                                                                   sdk_pool_handle,
                                                                   sdk_wallet_steward,
                                                                   txnPoolMasterNodes):
    """
    Demote primary and do multiple view changes forcing primaries rotation.
    Demoted primary should be skipped without additional view changes.
    """

    viewNo0 = checkViewNoForNodes(txnPoolNodeSet)

    logger.info("1. turn off the node which has primary replica for master instanse, "
                " this should trigger view change")
    master_node = txnPoolMasterNodes[0]
    node_dest = hexToFriendly(master_node.nodestack.verhex)
    sdk_send_update_node(looper, sdk_wallet_steward,
                         sdk_pool_handle,
                         node_dest, master_node.name,
                         None, None,
                         None, None,
                         services=[])

    restNodes = [node for node in txnPoolNodeSet \
                 if node.name != master_node.name]
    ensureElectionsDone(looper, restNodes)

    viewNo1 = checkViewNoForNodes(restNodes)

    assert viewNo1 == viewNo0 + 1
    assert master_node.viewNo == viewNo0
    assert len(restNodes[0].replicas) == 1  # only one instance left
    assert restNodes[0].replicas[0].primaryName != master_node.name

    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.info("2. force view change 2 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)

    viewNo2 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo2 == viewNo1 + 1

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.info("3. force view change 3 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)
    viewNo3 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo3 == viewNo2 + 1

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.info("4. force view change 4 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)
    viewNo4 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo4 == viewNo3 + 1

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)
def view_setup(looper, txnPoolNodeSet):
    for i in range(2):
        ensure_view_change_complete(looper, txnPoolNodeSet)
    for node in txnPoolNodeSet:
        assert node.viewNo == 2
Esempio n. 26
0
def test_primary_selection_after_demoted_node_promotion(
        looper, txnPoolNodeSet, sdk_node_theta_added, sdk_pool_handle, tconf,
        tdir, allPluginsPath):
    """
    Demote non-primary node
    Promote it again
    Restart one node to get the following difference with others:
        - not restarted - node registry and related pool parameters are kept
          in memory in some state which is expected as the same as
          in the pool ledger
        - restarted one - loaded node registry and pool parameters from
          the pool ledger at startup
    Do several view changes and check that all nodes will choose previously
        demoted / promoted node as a primary for some instanse
    """

    new_steward_wallet, new_node = sdk_node_theta_added

    # viewNo0 = checkViewNoForNodes(txnPoolNodeSet)
    check_all_nodes_the_same_pool_list(txnPoolNodeSet)

    logger.info("1. Demote node Theta")

    node_dest = hexToFriendly(new_node.nodestack.verhex)
    sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle,
                         node_dest, new_node.name, None, None, None, None, [])
    remainingNodes = list(set(txnPoolNodeSet) - {new_node})

    check_all_nodes_the_same_pool_list(remainingNodes)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # TODO view change might happen unexpectedly by unknown reason
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    logger.info("2. Promote node Theta back")

    sdk_send_update_node(looper, new_steward_wallet, sdk_pool_handle,
                         node_dest, new_node.name, None, None, None, None,
                         [VALIDATOR])

    check_all_nodes_the_same_pool_list(txnPoolNodeSet)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info("3. Restart one node")
    stopped_node = txnPoolNodeSet[0]

    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            stopped_node,
                                            stopNode=True)
    looper.removeProdable(stopped_node)
    remainingNodes = list(set(txnPoolNodeSet) - {stopped_node})
    ensureElectionsDone(looper, remainingNodes)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    # start node
    restartedNode = start_stopped_node(stopped_node, looper, tconf, tdir,
                                       allPluginsPath)
    txnPoolNodeSet = remainingNodes + [restartedNode]
    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet, 3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info(
        "4. Do view changes to check that nodeTheta will be chosen "
        "as a primary for some instance by all nodes after some rounds")
    while txnPoolNodeSet[0].viewNo < 4:
        ensure_view_change_complete(looper, txnPoolNodeSet)
        # ensure pool is working properly
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  new_steward_wallet, 3)
Esempio n. 27
0
def test_revert_auth_rule_changing(looper, txnPoolNodeSet, sdk_wallet_trustee,
                                   sdk_wallet_steward, sdk_pool_handle):
    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
    wh, _ = sdk_wallet_trustee
    new_steward_did, new_steward_verkey = create_verkey_did(looper, wh)
    new_steward_did2, new_steward_verkey2 = create_verkey_did(looper, wh)
    """We try to change rule for adding new steward. For this case we """
    changed_constraint = AuthConstraint(role=STEWARD, sig_count=1)
    action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD)
    with delay_rules_without_processing(
            node_stashers, pDelay(), cDelay(),
            msg_rep_delay(types_to_delay=[PREPARE, COMMIT])):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_action=ADD_PREFIX,
            auth_type=action.txn_type,
            field=action.field,
            new_value=action.value,
            old_value=None,
            constraint=changed_constraint.as_dict,
            no_wait=True)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Try to add new steward by already existed trustee.
        Validation should raise exception because we change uncommitted state
        by adding new rule, that "Only steward can add new steward"
        """
        with pytest.raises(RequestRejectedException,
                           match="Not enough STEWARD signatures"):
            sdk_add_new_nym(looper,
                            sdk_pool_handle,
                            sdk_wallet_trustee,
                            'newSteward1',
                            STEWARD_STRING,
                            dest=new_steward_did,
                            verkey=new_steward_verkey)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Catchup should revert config_state and discard rule changing
        """
        for n in txnPoolNodeSet:
            n.start_catchup()
        # clear all request queues to not re-send the AuthRule txns again
        for n in txnPoolNodeSet:
            n.requests.clear()
            for r in n.replicas.values():
                r._ordering_service.requestQueues[DOMAIN_LEDGER_ID].clear()
                r._ordering_service.requestQueues[CONFIG_LEDGER_ID].clear()
        looper.run(
            eventually(
                lambda nodes: assertExp(
                    all([n.mode == Mode.participating for n in nodes])),
                txnPoolNodeSet))

    # do view change to not send PrePrepares with the same ppSeqNo and viewNo
    ensure_view_change_complete(looper, txnPoolNodeSet)
    """
    Try to create new steward by steward
    We can not do this, because AUTH_RULE txn was reverted
    """
    with pytest.raises(RequestRejectedException,
                       match="Not enough TRUSTEE signatures"):
        sdk_add_new_nym(looper,
                        sdk_pool_handle,
                        sdk_wallet_steward,
                        'newSteward2',
                        STEWARD_STRING,
                        dest=new_steward_did2,
                        verkey=new_steward_verkey2)
def test_primary_selection_after_demoted_node_promotion(
        looper, txnPoolNodeSet, nodeThetaAdded,
        tconf, tdirWithPoolTxns, allPluginsPath):
    """
    Demote non-primary node
    Promote it again
    Restart one node to get the following difference with others:
        - not restarted - node registry and related pool parameters are kept
          in memory in some state which is expected as the same as
          in the pool ledger
        - restarted one - loaded node registry and pool parameters from
          the pool ledger at startup
    Do several view changes and check that all nodes will choose previously
        demoted / promoted node as a primary for some instanse
    """

    nodeThetaSteward, nodeThetaStewardWallet, nodeTheta = nodeThetaAdded

    # viewNo0 = checkViewNoForNodes(txnPoolNodeSet)
    check_all_nodes_the_same_pool_list(txnPoolNodeSet)

    logger.info("1. Demote node Theta")

    node_data = {
        ALIAS: nodeTheta.name,
        SERVICES: []
    }
    updateNodeData(looper, nodeThetaSteward,
                   nodeThetaStewardWallet, nodeTheta, node_data)
    remainingNodes = list(set(txnPoolNodeSet) - {nodeTheta})

    check_all_nodes_the_same_pool_list(remainingNodes)
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # TODO view change might happen unexpectedly by unknown reason
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    logger.info("2. Promote node Theta back")

    node_data = {
        ALIAS: nodeTheta.name,
        SERVICES: [VALIDATOR]
    }
    updateNodeData(looper, nodeThetaSteward,
                   nodeThetaStewardWallet, nodeTheta, node_data)

    check_all_nodes_the_same_pool_list(txnPoolNodeSet)
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info("3. Restart one node")
    stopped_node = txnPoolNodeSet[0]

    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            stopped_node, stopNode=True)
    looper.removeProdable(stopped_node)
    remainingNodes = list(set(txnPoolNodeSet) - {stopped_node})
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # checkViewNoForNodes(remainingNodes, expectedViewNo=viewNo0)

    # start node
    restartedNode = start_stopped_node(stopped_node, looper, tconf,
                                       tdirWithPoolTxns, allPluginsPath)
    txnPoolNodeSet = remainingNodes + [restartedNode]
    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
    # ensure pool is working properly
    sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                        nodeThetaSteward, numReqs=3)
    # checkViewNoForNodes(txnPoolNodeSet, expectedViewNo=viewNo0)

    logger.info("4. Do view changes to check that nodeTheta will be chosen "
                "as a primary for some instance by all nodes after some rounds")
    while txnPoolNodeSet[0].viewNo < 4:
        ensure_view_change_complete(looper, txnPoolNodeSet)
        # ensure pool is working properly
        sendReqsToNodesAndVerifySuffReplies(looper, nodeThetaStewardWallet,
                                            nodeThetaSteward, numReqs=3)
Esempio n. 29
0
def test_primary_selection_after_primary_demotion_and_view_changes(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward,
        txnPoolMasterNodes):
    """
    Demote primary and do multiple view changes forcing primaries rotation.
    Demoted primary should be skipped without additional view changes.
    """

    viewNo0 = checkViewNoForNodes(txnPoolNodeSet)

    logger.info(
        "1. turn off the node which has primary replica for master instanse, "
        " this should trigger view change")
    master_node = txnPoolMasterNodes[0]
    node_dest = hexToFriendly(master_node.nodestack.verhex)
    sdk_send_update_node(looper,
                         sdk_wallet_steward,
                         sdk_pool_handle,
                         node_dest,
                         master_node.name,
                         None,
                         None,
                         None,
                         None,
                         services=[])

    restNodes = [node for node in txnPoolNodeSet \
                 if node.name != master_node.name]
    ensureElectionsDone(looper, restNodes)

    viewNo1 = checkViewNoForNodes(restNodes)

    assert viewNo1 == viewNo0 + 1
    assert master_node.viewNo == viewNo0
    assert len(restNodes[0].replicas) == 1  # only one instance left
    assert restNodes[0].replicas[0].primaryName != master_node.name

    # ensure pool is working properly
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.info("2. force view change 2 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)

    viewNo2 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo2 == viewNo1 + 1

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.info("3. force view change 3 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)
    viewNo3 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo3 == viewNo2 + 1

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)

    logger.info("4. force view change 4 and check final viewNo")
    ensure_view_change_complete(looper, restNodes)
    viewNo4 = checkViewNoForNodes(restNodes)
    assert restNodes[0].replicas[0].primaryName != master_node.name
    assert viewNo4 == viewNo3 + 1

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 3)