コード例 #1
0
def test_catchup_with_reask_cp(txnPoolNodeSet, looper, sdk_pool_handle,
                               sdk_wallet_steward, tconf, tdir,
                               allPluginsPath):
    '''
    Start a catchup
    Delay ConsistencyProofs twice
    Check that the catchup finished
    '''
    lagged_node = txnPoolNodeSet[-1]
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
    with delay_rules_without_processing(
            lagged_node.nodeIbStasher, delay_3pc(),
            msg_rep_delay(types_to_delay=[COMMIT])):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_steward, 2)
        lagged_node.nodeIbStasher.drop_delayeds()

    with delay_rules_without_processing(lagged_node.nodeIbStasher, cpDelay()):
        lagged_node.start_catchup()

        def chk():
            cp_count = 0
            for msg in lagged_node.nodeIbStasher.delayeds:
                if isinstance(msg.item[0], ConsistencyProof):
                    cp_count += 1
            assert cp_count >= (len(txnPoolNodeSet) - 1) * 2
            lagged_node.nodeIbStasher.drop_delayeds()

        looper.run(eventually(chk))
    waitNodeDataEquality(looper,
                         lagged_node,
                         *txnPoolNodeSet,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
コード例 #2
0
def test_backup_can_order_after_catchup(txnPoolNodeSet, looper,
                                        sdk_pool_handle, sdk_wallet_client):
    # We expect that after VC Gamma will be primary on backup
    delayed_node = txnPoolNodeSet[-2]
    with delay_rules_without_processing(delayed_node.nodeIbStasher,
                                        pDelay(instId=MASTER_REPLICA_INDEX),
                                        cDelay(instId=MASTER_REPLICA_INDEX),
                                        ppDelay(instId=MASTER_REPLICA_INDEX)):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, REQUEST_COUNT)
        with delay_rules_without_processing(
            [n.nodeIbStasher for n in txnPoolNodeSet],
                old_view_pp_request_delay()):
            ensure_view_change(looper, txnPoolNodeSet)
            ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
            assert delayed_node.replicas._replicas[BACKUP_INST_ID].isPrimary
            # Check, that backup cannot order
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, REQUEST_COUNT)
            for n in txnPoolNodeSet:
                assert n.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[
                    1] == 0
            # Forcing catchup
            delayed_node.start_catchup()
            ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

            # Check, that backup can order after catchup
            b_pp_seq_no_before = delayed_node.replicas._replicas[
                BACKUP_INST_ID].last_ordered_3pc[1]
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, REQUEST_COUNT)
            assert delayed_node.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == \
                   b_pp_seq_no_before + REQUEST_COUNT
コード例 #3
0
def test_re_order_pre_prepares_no_pre_prepares(looper, txnPoolNodeSet,
                                               sdk_wallet_client, sdk_pool_handle):
    # 1. drop PrePrepars, Prepares and Commits on 4thNode
    # Order a couple of requests on Nodes 1-3
    lagging_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, delay_3pc()):
        sdk_send_random_and_check(looper, txnPoolNodeSet,
                                  sdk_pool_handle, sdk_wallet_client, 3)
        assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes)

    with delay_rules_without_processing(lagging_node.nodeIbStasher,
                                        msg_rep_delay(types_to_delay=[PREPREPARE, PREPARE, COMMIT])):
        # 2. simulate view change start so that
        # all PrePrepares/Prepares/Commits are cleared
        # and uncommitted txns are reverted
        for n in txnPoolNodeSet:
            n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1))
            master_ordering_service = n.master_replica._ordering_service
            assert not master_ordering_service.prePrepares
            assert not master_ordering_service.prepares
            assert not master_ordering_service.commits
            ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID]
            state = n.db_manager.states[DOMAIN_LEDGER_ID]
            assert len(ledger.uncommittedTxns) == 0
            assert ledger.uncommitted_root_hash == ledger.tree.root_hash
            assert state.committedHead == state.head

        # check that all nodes but the lagging one have old_view_pps stored
        for n in other_nodes:
            assert n.master_replica._ordering_service.old_view_preprepares
        assert not lagging_node.master_replica._ordering_service.old_view_preprepares

        # 3. Simulate View Change finish to re-order the same PrePrepare
        assert lagging_node.master_last_ordered_3PC == (0, 0)
        new_master = txnPoolNodeSet[1]
        batches = sorted([preprepare_to_batch_id(pp) for _, pp in
                         new_master.master_replica._ordering_service.old_view_preprepares.items()])
        new_view_msg = NewView(viewNo=0,
                               viewChanges=[],
                               checkpoint=None,
                               batches=batches)
        new_view_chk_applied_msg = NewViewCheckpointsApplied(view_no=0,
                                                             view_changes=[],
                                                             checkpoint=None,
                                                             batches=batches)
        for n in txnPoolNodeSet:
            n.master_replica._consensus_data.new_view = new_view_msg
            n.master_replica._consensus_data.prev_view_prepare_cert = batches[-1].pp_seq_no
            n.master_replica._ordering_service._bus.send(new_view_chk_applied_msg)

        # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that
        # the request was eventually ordered on Node4 as well
        waitNodeDataEquality(looper, lagging_node, *other_nodes, customTimeout=60)
        assert lagging_node.master_last_ordered_3PC == (0, 4)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_revert_xfer_with_fees_before_catchup(looper, helpers,
                                              nodeSetWithIntegratedTokenPlugin,
                                              sdk_pool_handle, fees,
                                              xfer_mint_tokens,
                                              xfer_addresses):
    nodes = nodeSetWithIntegratedTokenPlugin
    node_stashers = [n.nodeIbStasher for n in nodes]
    helpers.general.do_set_fees(fees)
    [address_giver, address_receiver] = xfer_addresses
    inputs = helpers.general.get_utxo_addresses([address_giver])[0]
    outputs = [{
        ADDRESS: address_receiver,
        AMOUNT: 1000 - fees[XFER_PUBLIC_FEES_ALIAS]
    }]
    request = helpers.request.transfer(inputs, outputs)
    with delay_rules_without_processing(node_stashers, cDelay(), pDelay()):
        helpers.sdk.send_request_objects([request])
        looper.runFor(waits.expectedPrePrepareTime(len(nodes)))
        for n in nodes:
            n.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
        for n in nodes:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))
    ensure_all_nodes_have_same_data(looper, nodes)
コード例 #5
0
def test_lag_less_then_catchup(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client):
    delayed_node = txnPoolNodeSet[-1]
    other_nodes = list(set(txnPoolNodeSet) - {delayed_node})
    current_view_no = checkViewNoForNodes(txnPoolNodeSet)
    last_ordered_before = delayed_node.master_replica.last_ordered_3pc
    with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay()):
        # Send txns for stable checkpoint
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ)
        # Check, that all of not slowed nodes has a stable checkpoint
        for n in other_nodes:
            assert n.master_replica._consensus_data.stable_checkpoint == CHK_FREQ

        # Send another txn. This txn will be reordered after view_change
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
        trigger_view_change(txnPoolNodeSet)
        ensureElectionsDone(looper, txnPoolNodeSet)

        assert delayed_node.master_replica.last_ordered_3pc == last_ordered_before

    # Send txns for stabilize checkpoint on other nodes
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ - 1)

    pool_pp_seq_no = get_pp_seq_no(other_nodes)
    looper.run(eventually(lambda: assertExp(delayed_node.master_replica.last_ordered_3pc[1] == pool_pp_seq_no)))
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
コード例 #6
0
def test_delay_commits_for_one_node(looper,
                                    txnPoolNodeSet,
                                    sdk_pool_handle,
                                    sdk_wallet_client,
                                    slow_node_is_next_primary,
                                    vc_counts):
    current_view_no = checkViewNoForNodes(txnPoolNodeSet)
    excepted_view_no = current_view_no + 1 if vc_counts == 'once' else current_view_no + 2
    next_primary = get_next_primary_name(txnPoolNodeSet, excepted_view_no)
    pretenders = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet) if not r.isPrimary]
    if slow_node_is_next_primary:
        delayed_node = [n for n in pretenders if n.name == next_primary][0]
    else:
        delayed_node = [n for n in pretenders if n.name != next_primary][0]

    with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2)

        trigger_view_change(txnPoolNodeSet)
        if vc_counts == 'twice':
            for node in txnPoolNodeSet:
                node.view_changer.start_view_change(current_view_no + 2)

    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_revert_nym_with_fees_before_catchup(looper, helpers,
                                             nodeSetWithIntegratedTokenPlugin,
                                             fees_set, fees, xfer_mint_tokens,
                                             xfer_addresses):
    nodes = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(xfer_mint_tokens)
    seq_no = get_seq_no(xfer_mint_tokens)
    lagging_node = nodes[-1]
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(),
                                        pDelay()):
        current_amount, seq_no, _ = send_and_check_nym_with_fees(
            helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
        looper.runFor(waits.expectedPrePrepareTime(len(nodes)))
        lagging_node.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
        for n in nodes:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))
    ensure_all_nodes_have_same_data(looper, nodes)
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
    ensure_all_nodes_have_same_data(looper, nodes)
コード例 #8
0
def test_node_reject_invalid_txn_during_catchup(looper, sdk_pool_handle,
                                                sdk_wallet_client, tconf, tdir,
                                                txnPoolNodeSet, bad_node,
                                                lagging_node):
    """
    Make sure that catching up node will blacklist nodes which send incorrect catchup replies
    """
    normal_nodes = [
        node for node in txnPoolNodeSet
        if node not in [bad_node, lagging_node]
    ]
    normal_stashers = [node.nodeIbStasher for node in normal_nodes]

    with delay_rules_without_processing(lagging_node.nodeIbStasher,
                                        delay_3pc()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 5)

        # Perform catchup, while making sure that cons proof from bad node is received
        # before cons proofs from normal nodes, so bad node can participate in catchup
        with delay_rules(normal_stashers, lsDelay()):
            lagging_node.start_catchup()

            node_leecher = lagging_node.ledgerManager._node_leecher
            audit_cons_proof_service = node_leecher._leechers[
                AUDIT_LEDGER_ID]._cons_proof_service
            looper.run(
                eventually(lambda: assert_in(
                    bad_node.name, audit_cons_proof_service._cons_proofs)))

        waitNodeDataEquality(looper, lagging_node, *normal_nodes)
        assert lagging_node.isNodeBlacklisted(bad_node.name)
コード例 #9
0
def test_backup_stabilized_checkpoint_on_view_change(looper, txnPoolNodeSet,
                                                     sdk_wallet_client,
                                                     sdk_pool_handle):
    # Delta:1
    backup = txnPoolNodeSet[-1].replicas[1]
    count_of_replicas = len(txnPoolNodeSet[0].replicas)
    with delay_rules_without_processing(
        [n.nodeIbStasher for n in txnPoolNodeSet], ppDelay(instId=0)):
        sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                                 REQ_COUNT)
        looper.run(
            eventually(
                lambda r: assertExp(r.last_ordered_3pc == (0, REQ_COUNT)),
                backup))
        # assert that all of requests are propagated
        for n in txnPoolNodeSet:
            for req in n.requests.values():
                assert req.forwardedTo == count_of_replicas

        ensure_view_change(looper, txnPoolNodeSet)
        ensureElectionsDone(looper, txnPoolNodeSet)

        # check, that all requests was freed on backups
        for n in txnPoolNodeSet:
            for req in n.requests.values():
                assert req.forwardedTo == count_of_replicas - 1
コード例 #10
0
def test_revert_works_for_fees_after_view_change(looper, helpers,
                                                 nodeSetWithIntegratedTokenPlugin,
                                                 sdk_pool_handle,
                                                 fees_set,
                                                 mint_tokens, addresses, fees):
    node_set = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = get_seq_no(mint_tokens)
    reverted_node = nodeSetWithIntegratedTokenPlugin[-1]

    current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                                             current_amount)
    current_amount, seq_no, _ = send_and_check_transfer(helpers, addresses, fees, looper, current_amount, seq_no)

    with delay_rules_without_processing(reverted_node.nodeIbStasher, delay_3pc(view_no=0, msgs=Commit)):
        len_batches_before = len(reverted_node.master_replica._ordering_service.batches)
        current_amount, seq_no, _ = send_and_check_transfer(helpers, addresses, fees, looper, current_amount, seq_no)
        current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                                                 current_amount)
        looper.runFor(waits.expectedPrePrepareTime(len(nodeSetWithIntegratedTokenPlugin)))
        len_batches_after = len(reverted_node.master_replica._ordering_service.batches)

        """
        Checks, that we have a 2 new batches
        """
        assert len_batches_after - len_batches_before == 2
        for n in node_set:
            n.view_changer.on_master_degradation()
        ensure_view_change(looper, nodeSetWithIntegratedTokenPlugin)

        looper.run(eventually(lambda: assertExp(reverted_node.mode == Mode.participating)))
    ensure_all_nodes_have_same_data(looper, node_set)

    send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses, current_amount)
    ensure_all_nodes_have_same_data(looper, node_set)
コード例 #11
0
def test_resend_inst_ch_in_progress_v_ch(txnPoolNodeSet, looper,
                                         sdk_pool_handle, sdk_wallet_client,
                                         tdir, tconf, allPluginsPath):
    old_view = viewNoForNodes(txnPoolNodeSet)

    # disconnect two nodes. One of them should be next master primary in case of view change.
    for node in [txnPoolNodeSet[1], txnPoolNodeSet[-1]]:
        disconnect_node_and_ensure_disconnected(looper,
                                                txnPoolNodeSet,
                                                node,
                                                stopNode=True)
        looper.removeProdable(node)
        txnPoolNodeSet.remove(node)

    # delay I_CH on every node except last one and initiate view change
    stashers = [n.nodeIbStasher for n in txnPoolNodeSet[:-1]]
    with delay_rules_without_processing(stashers, icDelay(viewNo=2)):
        ensure_view_change(looper, txnPoolNodeSet)
        looper.runFor(tconf.NEW_VIEW_TIMEOUT + 1)

    # checks
    def checks():
        assert all(not node.view_change_in_progress for node in txnPoolNodeSet)
        assert all(node.viewNo == old_view + 2 for node in txnPoolNodeSet)

    looper.run(
        eventually(checks, timeout=tconf.NEW_VIEW_TIMEOUT * 2.5, retryWait=1))

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
コード例 #12
0
def test_get_txn_after_bls_key_rotation(looper, txnPoolNodeSet,
                                        sdk_wallet_stewards,
                                        sdk_wallet_trustee,
                                        sdk_wallet_client,
                                        sdk_pool_handle):
    check_update_bls_key(node_num=0, saved_multi_sigs_count=4,
                         looper=looper, txnPoolNodeSet=txnPoolNodeSet,
                         sdk_wallet_stewards=sdk_wallet_stewards,
                         sdk_wallet_client=sdk_wallet_client,
                         sdk_pool_handle=sdk_pool_handle,
                         pool_refresh=False)
    check_update_bls_key(node_num=1, saved_multi_sigs_count=4,
                         looper=looper, txnPoolNodeSet=txnPoolNodeSet,
                         sdk_wallet_stewards=sdk_wallet_stewards,
                         sdk_wallet_client=sdk_wallet_client,
                         sdk_pool_handle=sdk_pool_handle,
                         pool_refresh=False)
    check_update_bls_key(node_num=2, saved_multi_sigs_count=4,
                         looper=looper, txnPoolNodeSet=txnPoolNodeSet,
                         sdk_wallet_stewards=sdk_wallet_stewards,
                         sdk_wallet_client=sdk_wallet_client,
                         sdk_pool_handle=sdk_pool_handle,
                         pool_refresh=False)
    check_update_bls_key(node_num=3, saved_multi_sigs_count=4,
                         looper=looper, txnPoolNodeSet=txnPoolNodeSet,
                         sdk_wallet_stewards=sdk_wallet_stewards,
                         sdk_wallet_client=sdk_wallet_client,
                         sdk_pool_handle=sdk_pool_handle,
                         pool_refresh=False)

    # Stop receiving of commits in a circle, so all nodes will have different sets of multi signatures
    with delay_rules_without_processing(txnPoolNodeSet[0].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[3].name)):
        with delay_rules_without_processing(txnPoolNodeSet[1].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[0].name)):
            with delay_rules_without_processing(txnPoolNodeSet[2].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[1].name)):
                with delay_rules_without_processing(txnPoolNodeSet[3].nodeIbStasher, cDelay(delay=1200, sender_filter=txnPoolNodeSet[2].name)):
                    did_future = create_and_store_my_did(sdk_wallet_client[0], "{}")
                    did, verkey = looper.loop.run_until_complete(did_future)
                    nym_request_future = ledger.build_nym_request(sdk_wallet_trustee[1], did, verkey, None, None)
                    nym_request = looper.loop.run_until_complete(nym_request_future)
                    nym_response_future = ledger.sign_and_submit_request(sdk_pool_handle, sdk_wallet_trustee[0], sdk_wallet_trustee[1], nym_request)
                    looper.loop.run_until_complete(nym_response_future)

                    get_txn_request_future = ledger.build_get_txn_request(sdk_wallet_client[1], "DOMAIN", 1)
                    get_txn_request = looper.loop.run_until_complete(get_txn_request_future)
                    get_txn_response_future = ledger.submit_request(sdk_pool_handle, get_txn_request)
                    looper.loop.run_until_complete(get_txn_response_future)
コード例 #13
0
def test_revert_auth_rule_changing(looper, txnPoolNodeSet, sdk_wallet_trustee,
                                   sdk_wallet_steward, sdk_pool_handle):
    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
    wh, _ = sdk_wallet_trustee
    new_steward_did, new_steward_verkey = create_verkey_did(looper, wh)
    new_steward_did2, new_steward_verkey2 = create_verkey_did(looper, wh)
    """We try to change rule for adding new steward. For this case we """
    changed_constraint = AuthConstraint(role=STEWARD, sig_count=1)
    action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD)
    with delay_rules_without_processing(node_stashers, pDelay(), cDelay()):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_wallet_trustee,
            sdk_pool_handle,
            auth_action=ADD_PREFIX,
            auth_type=action.txn_type,
            field=action.field,
            new_value=action.value,
            old_value=None,
            constraint=changed_constraint.as_dict,
            no_wait=True)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Try to add new steward by already existed trustee.
        Validation should raise exception because we change uncommitted state
        by adding new rule, that "Only steward can add new steward"
        """
        with pytest.raises(RequestRejectedException,
                           match="TRUSTEE can not do this action"):
            sdk_add_new_nym(looper,
                            sdk_pool_handle,
                            sdk_wallet_trustee,
                            'newSteward1',
                            STEWARD_STRING,
                            dest=new_steward_did,
                            verkey=new_steward_verkey)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Catchup should revert config_state and discard rule changing
        """
        for n in txnPoolNodeSet:
            n.start_catchup()
        for n in txnPoolNodeSet:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
    """
    Try to create new steward by steward
    We can not do this, because AUTH_RULE txn was reverted
    """
    with pytest.raises(RequestRejectedException,
                       match="STEWARD can not do this action"):
        sdk_add_new_nym(looper,
                        sdk_pool_handle,
                        sdk_wallet_steward,
                        'newSteward2',
                        STEWARD_STRING,
                        dest=new_steward_did2,
                        verkey=new_steward_verkey2)
コード例 #14
0
def test_second_checkpoint_after_catchup_can_be_stabilized(
        chkFreqPatched, looper, txnPoolNodeSet, sdk_wallet_steward,
        sdk_wallet_client, sdk_pool_handle, tdir, tconf, allPluginsPath):
    lagging_node = txnPoolNodeSet[-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client,
                                  tconf.Max3PCBatchSize * CHK_FREQ * 2)
    waitNodeDataEquality(looper, lagging_node, *txnPoolNodeSet[:-1])
    # Epsilon got lost transactions via catch-up.
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 2)

    master_replica = lagging_node.master_replica

    check_stable_checkpoint(master_replica, 10)
    check_num_received_checkpoints(master_replica, 0)

    assert master_replica.h == 10
    assert master_replica.H == 25

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    for replica in lagging_node.replicas.values():
        assert replica.h == 10
        assert replica.H == 25

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 6)
    stabilization_timeout = \
        waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    looper.runFor(stabilization_timeout)

    for replica in lagging_node.replicas.values():
        check_stable_checkpoint(replica, 15)
        check_num_unstable_checkpoints(replica, 0)

        # nothing is stashed since it's ordered during catch-up
        check_num_received_checkpoints(replica, 0)

        assert replica.h == 15
        assert replica.H == 30

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    looper.runFor(stabilization_timeout)

    for replica in lagging_node.replicas.values():
        check_stable_checkpoint(replica, 20)
        check_num_unstable_checkpoints(replica, 0)

        # nothing is stashed since it's ordered during catch-up
        check_num_received_checkpoints(replica, 0)

        assert replica.h == 20
        assert replica.H == 35
def test_catchup_from_unequal_nodes_without_reasking(looper, txnPoolNodeSet,
                                                     sdk_pool_handle,
                                                     sdk_wallet_client):
    lagged_node_1 = txnPoolNodeSet[-1]
    lagged_node_2 = txnPoolNodeSet[-2]
    normal_nodes = [
        node for node in txnPoolNodeSet
        if node not in [lagged_node_1, lagged_node_2]
    ]
    normal_stashers = [node.nodeIbStasher for node in normal_nodes]

    with delay_rules_without_processing(lagged_node_1.nodeIbStasher,
                                        delay_3pc()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 2)

        with delay_rules_without_processing(lagged_node_2.nodeIbStasher,
                                            delay_3pc()):
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 7)
            ensure_all_nodes_have_same_data(looper,
                                            normal_nodes,
                                            custom_timeout=30)

            # Perform catchup, while making sure that cons proof from lagging node is received
            # before cons proofs from normal nodes, so lagging node can participate in catchup
            with delay_rules(normal_stashers, lsDelay()):
                lagged_node_1.start_catchup()

                node_leecher = lagged_node_1.ledgerManager._node_leecher
                audit_cons_proof_service = node_leecher._leechers[
                    AUDIT_LEDGER_ID]._cons_proof_service
                looper.run(
                    eventually(lambda: assertExp(lagged_node_2.name in
                                                 audit_cons_proof_service.
                                                 _cons_proofs)))

            # Make sure catchup finishes successfully
            ensure_all_nodes_have_same_data(looper,
                                            set(txnPoolNodeSet) -
                                            {lagged_node_2},
                                            custom_timeout=30)
            assert lagged_node_1.ledgerManager._node_leecher._state == NodeLeecherService.State.Idle
コード例 #16
0
def test_re_order_pre_prepares(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle):
    # 0. use new 3PC validator
    for n in txnPoolNodeSet:
        ordering_service = n.master_replica._ordering_service
        ordering_service._validator = OrderingServiceMsgValidator(
            ordering_service._data)

    # 1. drop Prepares and Commits on 4thNode
    # Order a couple of requests on Nodes 1-3
    lagging_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(),
                                        pDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 3)
        assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes)

    # 2. simulate view change start so that
    # all PrePrepares/Prepares/Commits are cleared
    # and uncommitted txns are reverted
    for n in txnPoolNodeSet:
        n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1))
        master_ordering_service = n.master_replica._ordering_service
        assert not master_ordering_service.prePrepares
        assert not master_ordering_service.prepares
        assert not master_ordering_service.commits
        assert master_ordering_service.old_view_preprepares
        ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID]
        state = n.db_manager.states[DOMAIN_LEDGER_ID]
        assert len(ledger.uncommittedTxns) == 0
        assert ledger.uncommitted_root_hash == ledger.tree.root_hash
        assert state.committedHead == state.head

    # 3. Simulate View Change finish to re-order the same PrePrepare
    assert lagging_node.master_last_ordered_3PC == (0, 0)
    new_master = txnPoolNodeSet[1]
    batches = [
        preprepare_to_batch_id(pp) for _, pp in new_master.master_replica.
        _ordering_service.old_view_preprepares.items()
    ]
    new_view_msg = NewViewCheckpointsApplied(view_no=0,
                                             view_changes=[],
                                             checkpoint=None,
                                             batches=batches)
    for n in txnPoolNodeSet:
        n.master_replica._ordering_service._bus.send(new_view_msg)

    # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that
    # the request was eventually ordered on Node4 as well
    waitNodeDataEquality(looper, lagging_node, *other_nodes)
    assert lagging_node.master_last_ordered_3PC == (0, 3)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
def test_backups_dont_order_while_reordering(txnPoolNodeSet,
                                             sdk_pool_handle,
                                             sdk_wallet_client,
                                             looper):
    """
    This test needs to show that for now we stop ordering on backups
    until master in reordering state after view_change
    Steps:
    1. Delay ordering on master replica for collecting requests to reorder after VC
    2. Make sure that master didn't order
    3. Delay old_view_pp_request and force VC
    4. Ensure that all backup replica on all nodes cannot order
       because primary waiting for reordering on master
    """

    def check_pp_count(node, expected_count, inst_id=0):
        assert node.replicas._replicas[inst_id].last_ordered_3pc[1] == expected_count, \
            "master last ordered: {}, backup_last_ordered: {}".format(node.master_replica._ordering_service.batches,
                                                                      node.replicas._replicas[
                                                                          1]._ordering_service.batches)

    # We expect that after VC Gamma will be primary on backup
    delayed_node = txnPoolNodeSet[-2]
    fast_nodes = [n for n in txnPoolNodeSet if n != delayed_node]
    master_pp_seq_no_before = delayed_node.master_replica.last_ordered_3pc[1]
    with delay_rules_without_processing(delayed_node.nodeIbStasher,
                                        pDelay(instId=MASTER_REPLICA_INDEX),
                                        cDelay(instId=MASTER_REPLICA_INDEX),
                                        msg_req_delay(),
                                        msg_rep_delay(),
                                        ppDelay(instId=MASTER_REPLICA_INDEX)):
        sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING)
        looper.run(eventually(check_pp_count, delayed_node, REQS_FOR_REORDERING, BACKUP_INST_ID))
        assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before
        with delay_rules([n.nodeIbStasher for n in txnPoolNodeSet], old_view_pp_request_delay()):
            ensure_view_change(looper, txnPoolNodeSet)

            # check that view change is finished on all nodes
            looper.run(eventually(check_not_in_view_change, txnPoolNodeSet))

            # check that delayed node is selected on all fast nodes but not on delayed node
            def check_backup_primaries():
                assert delayed_node.replicas[BACKUP_INST_ID]._consensus_data.primary_name is None
                assert delayed_node.master_replica.last_ordered_3pc[1] == master_pp_seq_no_before
                assert all(
                    n.replicas[BACKUP_INST_ID]._consensus_data.primary_name == generateName(delayed_node.name,
                                                                                            instId=BACKUP_INST_ID)
                    for n in fast_nodes
                )

            looper.run(eventually(check_backup_primaries))

            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, REQS_FOR_REORDERING)
            for node in txnPoolNodeSet:
                assert node.replicas._replicas[BACKUP_INST_ID].last_ordered_3pc[1] == 0
コード例 #18
0
def test_revert_set_fees_and_view_change_all_nodes(
        nodeSetWithIntegratedTokenPlugin, xfer_mint_tokens, helpers, looper,
        xfer_addresses):
    """
        Send SET_FEES and init view change. Check that it is reverted and transaction passes with old fees
    """
    def _get_len_preprepares(n):
        replica = n.master_replica
        return len(replica._ordering_service.sent_preprepares if replica.
                   isPrimary else replica._ordering_service.prePrepares)

    def _check_len_pprs(old_pprs_len):
        _len_pprs = set([
            _get_len_preprepares(n) for n in nodeSetWithIntegratedTokenPlugin
        ])
        _len_ppr = _len_pprs.pop()
        assert old_pprs_len + 1 == _len_ppr

    helpers.general.do_set_fees({NYM_FEES_ALIAS: 3})
    nodes = nodeSetWithIntegratedTokenPlugin
    node_stashers = [n.nodeIbStasher for n in nodeSetWithIntegratedTokenPlugin]
    seq_no = get_seq_no(xfer_mint_tokens)
    _old_len_pprs = set(
        [_get_len_preprepares(n) for n in nodeSetWithIntegratedTokenPlugin])
    assert len(_old_len_pprs)
    _old_len_ppr = _old_len_pprs.pop()

    with delay_rules_without_processing(node_stashers, cDelay()):
        helpers.general.set_fees_without_waiting({NYM_FEES_ALIAS: 5})
        looper.run(eventually(functools.partial(_check_len_pprs,
                                                _old_len_ppr)))
        send_and_check_nym_with_fees(helpers, {FEES: {
            NYM_FEES_ALIAS: 5
        }},
                                     seq_no,
                                     looper,
                                     xfer_addresses,
                                     1000,
                                     check_reply=False)
        for n in nodeSetWithIntegratedTokenPlugin:
            n.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
    ensure_all_nodes_have_same_data(looper, nodes)
    send_and_check_nym_with_fees(helpers, {FEES: {
        NYM_FEES_ALIAS: 3
    }},
                                 seq_no,
                                 looper,
                                 xfer_addresses,
                                 1000,
                                 check_reply=False)
    ensure_all_nodes_have_same_data(looper, nodes)
def test_catchup_with_old_txn_metadata_digest_format(tdir, tconf, looper,
                                                     txnPoolNodeSet,
                                                     sdk_pool_handle,
                                                     sdk_wallet_client,
                                                     monkeypatch):
    lagging_node = txnPoolNodeSet[-1]
    lagging_stasher = lagging_node.nodeIbStasher
    other_nodes = txnPoolNodeSet[:-1]

    # Utility
    def check_nodes_domain_ledger(nodes: Iterable, txn_count: int):
        for node in nodes:
            assert node.domainLedger.size >= txn_count

    # Patch payload metadata, note that it will prevent pool from sending adequate replies to clients
    def append_old_payload_metadata(txn,
                                    frm=None,
                                    req_id=None,
                                    digest=None,
                                    payload_digest=None,
                                    taa_acceptance=None,
                                    endorser=None):
        txn = append_payload_metadata(txn, frm, req_id, digest, payload_digest,
                                      taa_acceptance, endorser)
        metadata = txn[TXN_PAYLOAD][TXN_PAYLOAD_METADATA]
        del metadata[TXN_PAYLOAD_METADATA_PAYLOAD_DIGEST]
        metadata[TXN_PAYLOAD_METADATA_DIGEST] = payload_digest
        return txn

    monkeypatch.setattr(txn_util, 'append_payload_metadata',
                        append_old_payload_metadata)

    # Check pool initial state
    initial_size = txnPoolNodeSet[0].domainLedger.size
    for node in txnPoolNodeSet:
        assert node.domainLedger.size == initial_size

    # Order some transactions, with one node discarding messages
    with delay_rules_without_processing(lagging_stasher, delay_3pc()):
        reps = sdk_send_random_requests(looper, sdk_pool_handle,
                                        sdk_wallet_client, 10)
        looper.run(
            eventually(check_nodes_domain_ledger, other_nodes,
                       initial_size + 10))
        assert lagging_node.domainLedger.size == initial_size

    # Catchup transactions and ensure that all nodes will eventually have same data
    lagging_node.start_catchup()
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    # Catch replies
    sdk_get_replies(looper, reps)
コード例 #20
0
def test_start_view_change_by_vc_msgs(looper,
                                      txnPoolNodeSet,
                                      sdk_wallet_client,
                                      sdk_pool_handle):

    delayed_node = txnPoolNodeSet[-1]
    rest_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(delayed_node.nodeIbStasher, icDelay()):
        current_view_no = checkViewNoForNodes(txnPoolNodeSet)
        trigger_view_change(txnPoolNodeSet)
        looper.run(eventually(checkViewNoForNodes, rest_nodes, current_view_no + 1))
        ensureElectionsDone(looper, txnPoolNodeSet)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
コード例 #21
0
def test_catchup_with_reask_ls(txnPoolNodeSet, looper, sdk_pool_handle,
                               sdk_wallet_steward, tconf, tdir,
                               allPluginsPath):
    '''
    Start a catchup
    Delay MessageqResp with LedgerStatuses twice
    Check that the catchup finished
    '''
    lagged_node = txnPoolNodeSet[-1]
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
    lagged_node.nodeIbStasher.delay(msg_rep_delay(types_to_delay=[COMMIT]))

    with delay_rules_without_processing(
            lagged_node.nodeIbStasher, delay_3pc(),
            msg_rep_delay(types_to_delay=[COMMIT])):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_steward, 2)
        lagged_node.nodeIbStasher.drop_delayeds()
    with delay_rules_without_processing(
            lagged_node.nodeIbStasher, lsDelay(),
            msg_rep_delay(types_to_delay=[LEDGER_STATUS])):
        lagged_node.start_catchup()

        def chk():
            resp_ls_count = 0
            for msg in lagged_node.nodeIbStasher.delayeds:
                if isinstance(
                        msg.item[0],
                        MessageRep) and msg.item[0].msg_type == LEDGER_STATUS:
                    resp_ls_count += 1
            assert resp_ls_count >= (len(txnPoolNodeSet) - 1) * 2
            lagged_node.nodeIbStasher.drop_delayeds()

        looper.run(eventually(chk))
    waitNodeDataEquality(looper,
                         lagged_node,
                         *txnPoolNodeSet,
                         exclude_from_check=['check_last_ordered_3pc_backup'])
コード例 #22
0
ファイル: helper.py プロジェクト: jandayanan/indy-plenum
def check_view_change_one_slow_node(looper,
                                    txnPoolNodeSet,
                                    sdk_pool_handle,
                                    sdk_wallet_client,
                                    vc_counts,
                                    slow_node_is_next_primary,
                                    delay_commit=True,
                                    delay_pre_prepare=True):
    current_view_no = checkViewNoForNodes(txnPoolNodeSet)
    expected_view_no = current_view_no + vc_counts
    next_primary = get_next_primary_name(txnPoolNodeSet, expected_view_no)
    pretenders = [
        r.node for r in getNonPrimaryReplicas(txnPoolNodeSet)
        if not r.isPrimary
    ]
    if slow_node_is_next_primary:
        delayed_node = [n for n in pretenders if n.name == next_primary][0]
    else:
        delayed_node = [n for n in pretenders if n.name != next_primary][0]
    fast_nodes = [node for node in txnPoolNodeSet if node != delayed_node]

    delayers = []
    if delay_pre_prepare:
        delayers.append(ppDelay())
        delayers.append(msg_rep_delay(types_to_delay=[PREPREPARE]))
    if delay_commit:
        delayers.append(cDelay())

    # delay OldViewPrePrepareReply so that slow node doesn't receive PrePrepares before ReOrdering phase finishes
    with delay_rules(delayed_node.nodeIbStasher, old_view_pp_reply_delay()):
        with delay_rules_without_processing(delayed_node.nodeIbStasher,
                                            *delayers):
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 1)
            trigger_view_change(txnPoolNodeSet)
            if vc_counts == 2:
                for node in txnPoolNodeSet:
                    node.master_replica.internal_bus.send(
                        NodeNeedViewChange(current_view_no + 2))

        waitForViewChange(looper=looper,
                          txnPoolNodeSet=txnPoolNodeSet,
                          expectedViewNo=expected_view_no)
        ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)

        # wait till fast nodes finish re-ordering
        looper.run(eventually(check_has_commits, fast_nodes))

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
コード例 #23
0
def test_unstash_waiting_for_first_batch_ordered_after_catchup(
        looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, tconf):
    lagged_node = txnPoolNodeSet[-1]
    other_nodes = list(set(txnPoolNodeSet) - {lagged_node})
    other_stashers = [n.nodeIbStasher for n in other_nodes]

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    last_ordered_lagged_before = lagged_node.master_last_ordered_3PC
    # do not process any message reqs for PrePrepares
    with delay_rules_without_processing(
            lagged_node.nodeIbStasher,
            msg_rep_delay(types_to_delay=[PREPARE, PREPREPARE])):
        with delay_rules(lagged_node.nodeIbStasher, cDelay()):
            ensure_view_change(looper, txnPoolNodeSet)
            looper.run(eventually(check_not_in_view_change, txnPoolNodeSet))
            ensureElectionsDone(looper,
                                other_nodes,
                                instances_list=range(
                                    getRequiredInstances(len(txnPoolNodeSet))))

            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 1)

            # delay Commits on all nodes so that there are some PrePrepares still stashed after catchup
            with delay_rules(other_stashers, cDelay()):
                pre_prep_before = len(recvdPrePrepareForInstId(lagged_node, 0))
                sdk_send_random_requests(looper, sdk_pool_handle,
                                         sdk_wallet_client, 2)
                # wait till lagged node recives the new PrePrepares
                # they will be stashed as WAITING_FIRST_BATCH_IN_VIEW
                looper.run(
                    eventually(lambda: assertExp(
                        len(recvdPrePrepareForInstId(lagged_node, 0)) ==
                        pre_prep_before + 2)))

                # catchup the lagged node
                # the latest 2 PrePrepares are still stashed
                lagged_node.start_catchup()
                looper.run(
                    eventually(
                        lambda: assertExp(lagged_node.master_last_ordered_3PC >
                                          last_ordered_lagged_before)))

            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 2)

    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30)
コード例 #24
0
def test_delay_IC_for_next_primary(looper,
                                   txnPoolNodeSet,
                                   sdk_pool_handle,
                                   sdk_wallet_client):
    current_view_no = checkViewNoForNodes(txnPoolNodeSet)
    next_primary_name = get_next_primary_name(txnPoolNodeSet, current_view_no + 1)
    next_primary = [n for n in txnPoolNodeSet if n.name == next_primary_name][0]
    rest_nodes = list(set(txnPoolNodeSet) - {next_primary})
    with delay_rules_without_processing(next_primary.nodeIbStasher, icDelay()):
        trigger_view_change(txnPoolNodeSet)
        looper.run(eventually(checkViewNoForNodes, rest_nodes, current_view_no + 1))
        ensureElectionsDone(looper, txnPoolNodeSet)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
    assert next_primary.master_replica.isPrimary
def test_catchup_from_unequal_nodes_without_waiting(looper,
                                                    txnPoolNodeSet,
                                                    sdk_pool_handle,
                                                    sdk_wallet_client):
    normal_node = txnPoolNodeSet[0]
    lagging_node_1 = txnPoolNodeSet[1]
    lagging_node_2 = txnPoolNodeSet[2]
    stopped_node = txnPoolNodeSet[3]

    # Make sure everyone have one batch
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    # Wait until all nodes have same data and store last 3PC number of node that's going to be "stopped"
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30)
    last_3pc = stopped_node.master_last_ordered_3PC

    with delay_rules_without_processing(stopped_node.nodeIbStasher, delay_3pc()):
        # Create one more batch on all nodes except "stopped" node
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

        with delay_rules(lagging_node_1.nodeIbStasher, delay_3pc(msgs=Commit)):
            # Create one more batch on all nodes except "stopped" and first lagging node
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

            with delay_rules(lagging_node_2.nodeIbStasher, delay_3pc(msgs=Commit)):
                # Create one more batch on all nodes except "stopped" and both lagging nodes
                # This time we can't wait for replies because there will be only one
                reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

                # Wait until normal node orders txn
                looper.run(eventually(lambda: assert_eq(normal_node.master_last_ordered_3PC[1],
                                                        last_3pc[1] + 3)))

                # Now all nodes have different number of txns, so if we try to start a catch up
                # it is guaranteed that we'll need to ask for equal consistency proofs, and
                # disabled timeout ensures that node can do so without relying on timeout
                stopped_node.start_catchup()

                # Wait until catchup ends
                looper.run(eventually(lambda: assert_eq(stopped_node.ledgerManager._node_leecher._state,
                                                        NodeLeecherService.State.Idle)))

                # Ensure stopped node caught up at least one batch
                assert stopped_node.master_last_ordered_3PC[1] > last_3pc[1]

                # And there was no view change
                assert stopped_node.master_last_ordered_3PC[0] == last_3pc[0]

            # Make sure replies from last request are eventually received
            sdk_get_and_check_replies(looper, reqs)
コード例 #26
0
def test_catching_up_auth_rule_txn(looper, txnPoolNodeSet, sdk_wallet_trustee,
                                   sdk_wallet_steward, sdk_pool_handle):
    delayed_node = txnPoolNodeSet[-1]
    wh, _ = sdk_wallet_trustee
    new_steward_did, new_steward_verkey = create_verkey_did(looper, wh)
    changed_constraint = AuthConstraint(role=STEWARD, sig_count=1)
    action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD)
    with pytest.raises(RequestRejectedException,
                       match="Not enough TRUSTEE signatures"):
        sdk_add_new_nym(looper,
                        sdk_pool_handle,
                        sdk_wallet_steward,
                        'newSteward2',
                        STEWARD_STRING,
                        dest=new_steward_did,
                        verkey=new_steward_verkey)
    with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay(),
                                        pDelay(), ppDelay()):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_action=ADD_PREFIX,
            auth_type=action.txn_type,
            field=action.field,
            new_value=action.value,
            old_value=None,
            constraint=changed_constraint.as_dict)
        sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                        'newSteward2')
        delayed_node.start_catchup()
        looper.run(
            eventually(
                lambda: assertExp(delayed_node.mode == Mode.participating)))
    sdk_add_new_nym(looper,
                    sdk_pool_handle,
                    sdk_wallet_steward,
                    'newSteward3',
                    STEWARD_STRING,
                    dest=new_steward_did,
                    verkey=new_steward_verkey)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    config_state = delayed_node.states[CONFIG_LEDGER_ID]
    from_state = config_state.get(config.make_state_path_for_auth_rule(
        action.get_action_id()),
                                  isCommitted=True)
    assert changed_constraint == ConstraintsSerializer(
        config_state_serializer).deserialize(from_state)
コード例 #27
0
ファイル: helper.py プロジェクト: jandayanan/indy-plenum
def check_view_change_adding_new_node(looper,
                                      tdir,
                                      tconf,
                                      allPluginsPath,
                                      txnPoolNodeSet,
                                      sdk_pool_handle,
                                      sdk_wallet_client,
                                      sdk_wallet_steward,
                                      slow_nodes=[],
                                      delay_commit=False,
                                      delay_pre_prepare=False):
    # Pre-requisites: viewNo=3, Primary is Node4
    for viewNo in range(1, 4):
        trigger_view_change(txnPoolNodeSet)
        waitForViewChange(looper, txnPoolNodeSet, viewNo)
        ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)

    # Delay 3PC messages on slow nodes
    fast_nodes = [node for node in txnPoolNodeSet if node not in slow_nodes]
    slow_stashers = [slow_node.nodeIbStasher for slow_node in slow_nodes]
    delayers = []
    if delay_pre_prepare:
        delayers.append(ppDelay())
        delayers.append(msg_rep_delay(types_to_delay=[PREPREPARE]))
    if delay_commit:
        delayers.append(cDelay())

    with delay_rules_without_processing(slow_stashers, *delayers):
        # Add Node5
        new_node = add_new_node(looper, fast_nodes, sdk_pool_handle,
                                sdk_wallet_steward, tdir, tconf,
                                allPluginsPath)
        old_set = list(txnPoolNodeSet)
        txnPoolNodeSet.append(new_node)

        # Trigger view change
        trigger_view_change(txnPoolNodeSet)

        # make sure view change is finished eventually
        waitForViewChange(looper, old_set, 4)
        ensureElectionsDone(looper, old_set)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
コード例 #28
0
def test_new_primary_lagging_behind(looper,
                                    txnPoolNodeSet,
                                    sdk_wallet_client,
                                    sdk_pool_handle,
                                    tconf):
    initial_view_no = checkViewNoForNodes(txnPoolNodeSet)
    next_primary_name = get_next_primary_name(txnPoolNodeSet, initial_view_no + 1)
    next_primary = [n for n in txnPoolNodeSet if n.name == next_primary_name][0]
    expected_primary_name = get_next_primary_name(txnPoolNodeSet, initial_view_no + 2)
    # Next primary cannot stabilize 1 checkpoint
    with delay_rules_without_processing(next_primary.nodeIbStasher, cDelay(), pDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ)
        ensure_view_change(looper, txnPoolNodeSet)
        ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet,
                            customTimeout=2 * tconf.NEW_VIEW_TIMEOUT)

    assert next_primary_name != expected_primary_name
    assert checkViewNoForNodes(txnPoolNodeSet) == initial_view_no + 2
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
コード例 #29
0
def test_view_change_with_next_primary_stopped_and_one_node_lost_commit(looper, txnPoolNodeSet,
                                                                        sdk_pool_handle, sdk_wallet_client,
                                                                        limitTestRunningTime):
    current_view_no = checkViewNoForNodes(txnPoolNodeSet)
    next_primary = get_next_primary_name(txnPoolNodeSet, current_view_no + 1)
    delayed_node = [r.node for r in getNonPrimaryReplicas(txnPoolNodeSet) if r.node.name != next_primary][0]
    other_nodes = [n for n in txnPoolNodeSet if n.name != next_primary]

    with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2)

        disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, next_primary)
        trigger_view_change(other_nodes)

    ensureElectionsDone(looper, other_nodes,
                        instances_list=range(2), customTimeout=15)
    ensure_all_nodes_have_same_data(looper, other_nodes)
    sdk_ensure_pool_functional(looper, other_nodes, sdk_wallet_client, sdk_pool_handle)
    ensure_all_nodes_have_same_data(looper, other_nodes)
コード例 #30
0
def test_upper_bound_of_checkpoint_after_catchup_is_divisible_by_chk_freq(
        chkFreqPatched, looper, txnPoolNodeSet, sdk_pool_handle,
        sdk_wallet_steward, sdk_wallet_client, tdir, tconf, allPluginsPath):
    lagging_node = txnPoolNodeSet[-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client,
                                  tconf.Max3PCBatchSize * CHK_FREQ * 2 + 1)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    waitNodeDataEquality(looper,
                         lagging_node,
                         *txnPoolNodeSet[:-1],
                         exclude_from_check=['check_last_ordered_3pc_backup'])
    # Epsilon did not participate in ordering of the batch with EpsilonSteward
    # NYM transaction and the batch with Epsilon NODE transaction.
    # Epsilon got these transactions via catch-up.

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client,
                              (CHK_FREQ - 1) * tconf.Max3PCBatchSize)

    for replica in txnPoolNodeSet[0].replicas.values():
        check_stable_checkpoint(replica, CHK_FREQ * 3)