예제 #1
0
def test_freeing_forwarded_not_preprepared_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath):
    behind_node = txnPoolNodeSet[-1]
    behind_node.requests.clear()

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_steward,
                                         CHK_FREQ, CHK_FREQ)
    with delay_rules(
            behind_node.nodeIbStasher,
            chk_delay(delay=sys.maxsize,
                      instId=behind_node.replicas.values()[-1])):
        with delay_rules(behind_node.nodeIbStasher, ppDelay(delay=sys.maxsize),
                         pDelay(delay=sys.maxsize), cDelay(delay=sys.maxsize)):
            count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)
            sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                       sdk_wallet_steward, req_num, req_num)
            looper.run(
                eventually(node_caughtup, behind_node, count, retryWait=1))
            looper.run(
                eventually(
                    lambda: assertExp(len(behind_node.requests) == req_num)))

    # We execute caughtup requests
    looper.run(
        eventually(lambda: assertExp(len(behind_node.requests) == req_num)))
    assert all(r.executed for r in behind_node.requests.values()
               if behind_node.seqNoDB.get(r.request.key)[1])
def test_resending_for_old_stash_msgs(tdir, tconf, looper, stacks,
                                      alpha_handler, monkeypatch):
    alpha, beta = stacks
    msg1 = {'msg': 'msg1'}
    pending_client_messages = beta._client_message_provider._pending_client_messages

    alpha.connect(name=beta.name,
                  ha=beta.ha,
                  verKeyRaw=beta.verKeyRaw,
                  publicKeyRaw=beta.publicKeyRaw)
    looper.runFor(0.25)

    def fake_send_multipart(msg_parts,
                            flags=0,
                            copy=True,
                            track=False,
                            **kwargs):
        raise zmq.Again

    monkeypatch.setattr(beta.listener, 'send_multipart', fake_send_multipart)
    alpha.send(msg1, beta.name)
    looper.run(
        eventually(
            lambda messages: assertExp(messages[alpha.listener.IDENTITY] == [(
                0, msg1)]), pending_client_messages))
    monkeypatch.undo()

    beta._client_message_provider._timer.set_time(
        tconf.RESEND_CLIENT_MSG_TIMEOUT + 2)
    looper.run(
        eventually(
            lambda msg_handler: assertExp(msg_handler.received_messages ==
                                          [msg1]), alpha_handler))
    assert not pending_client_messages
def test_stabilize_checkpoint_while_unstashing_when_missing_pre_prepare(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_client):
    # Prepare nodes
    lagging_node = txnPoolNodeSet[-1]
    lagging_master_replcia = lagging_node.master_replica
    rest_nodes = txnPoolNodeSet[:-1]

    # 1. send enough requests so that just 1 is left for checkpoint stabilization
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, reqs_for_checkpoint - 1)

    # 2. delay PrePrepare on 1 node so that prepares and commits will be stashed
    with delay_rules(lagging_node.nodeIbStasher, ppDelay()):
        with delay_rules(lagging_node.nodeIbStasher, msg_rep_delay()):
            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 1)

            # all good nodes stabilized checkpoint
            looper.run(
                eventually(check_for_nodes, rest_nodes,
                           check_stable_checkpoint, 5))

            # bad node received checkpoints from all nodes but didn't stabilize it
            looper.run(
                eventually(check_for_nodes, [lagging_node],
                           check_stable_checkpoint, 0))
            looper.run(
                eventually(check_for_nodes,
                           [lagging_node], check_received_checkpoint_votes, 5,
                           len(rest_nodes)))

            # bad node has all commits and prepares for the last request stashed
            looper.run(
                eventually(lambda: assertExp(
                    (0, CHK_FREQ) in lagging_master_replcia._ordering_service.
                    preparesWaitingForPrePrepare and len(
                        lagging_master_replcia._ordering_service.
                        preparesWaitingForPrePrepare[
                            (0, CHK_FREQ)]) == len(rest_nodes) - 1)))
            looper.run(
                eventually(lambda: assertExp(
                    (0, CHK_FREQ) in lagging_master_replcia._ordering_service.
                    commitsWaitingForPrepare and len(
                        lagging_master_replcia._ordering_service.
                        commitsWaitingForPrepare[
                            (0, CHK_FREQ)]) == len(rest_nodes))))

    # 3. the delayed PrePrepare is processed, and stashed prepares and commits are unstashed
    # checkpoint will be stabilized during unstashing, and the request will be ordered
    looper.run(
        eventually(check_for_nodes, [lagging_node], check_stable_checkpoint,
                   5))
    waitNodeDataEquality(looper, *txnPoolNodeSet, customTimeout=5)
예제 #4
0
def test_unstash_waiting_for_first_batch_ordered_after_catchup(
        looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, tconf):
    lagged_node = txnPoolNodeSet[-1]
    other_nodes = list(set(txnPoolNodeSet) - {lagged_node})
    other_stashers = [n.nodeIbStasher for n in other_nodes]

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    last_ordered_lagged_before = lagged_node.master_last_ordered_3PC
    # do not process any message reqs for PrePrepares
    with delay_rules_without_processing(
            lagged_node.nodeIbStasher,
            msg_rep_delay(types_to_delay=[PREPARE, PREPREPARE])):
        with delay_rules(lagged_node.nodeIbStasher, cDelay()):
            ensure_view_change(looper, txnPoolNodeSet)
            looper.run(eventually(check_not_in_view_change, txnPoolNodeSet))
            ensureElectionsDone(looper,
                                other_nodes,
                                instances_list=range(
                                    getRequiredInstances(len(txnPoolNodeSet))))

            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 1)

            # delay Commits on all nodes so that there are some PrePrepares still stashed after catchup
            with delay_rules(other_stashers, cDelay()):
                pre_prep_before = len(recvdPrePrepareForInstId(lagged_node, 0))
                sdk_send_random_requests(looper, sdk_pool_handle,
                                         sdk_wallet_client, 2)
                # wait till lagged node recives the new PrePrepares
                # they will be stashed as WAITING_FIRST_BATCH_IN_VIEW
                looper.run(
                    eventually(lambda: assertExp(
                        len(recvdPrePrepareForInstId(lagged_node, 0)) ==
                        pre_prep_before + 2)))

                # catchup the lagged node
                # the latest 2 PrePrepares are still stashed
                lagged_node.start_catchup()
                looper.run(
                    eventually(
                        lambda: assertExp(lagged_node.master_last_ordered_3PC >
                                          last_ordered_lagged_before)))

            sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                      sdk_wallet_client, 2)

    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=30)
def test_node_erases_last_sent_pp_key_on_view_change(looper, txnPoolNodeSet,
                                                     sdk_pool_handle,
                                                     sdk_wallet_client, tconf):
    # Get a node with a backup primary replica
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    node = replica.node

    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client,
                               num_reqs=3,
                               num_batches=num_batches_before,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 3)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that there is a stored last sent PrePrepare key on the node
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Make the pool perform view change
    ensure_view_change(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)

    # Verify that the node has erased the stored last sent PrePrepare key
    for value in node.last_sent_pp_store_helper._load_last_sent_pp_key(
    ).values():
        # + 1 it's after view_change
        assert value == [node.viewNo, num_batches_before + 1]

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client,
                               num_reqs=1,
                               num_batches=num_batches_after,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(
            lambda: assertExp(replica.last_ordered_3pc ==
                              (1, num_batches_before + num_batches_after + 1)),
            retryWait=1,
            timeout=waits.expectedTransactionExecutionTime(nodeCount)))
예제 #6
0
def test_twice_demoted_node_dont_write_txns(txnPoolNodeSet, looper,
                                            sdk_wallet_stewards,
                                            sdk_pool_handle):
    request_count = 5
    demoted_node = txnPoolNodeSet[2]
    alive_pool = list(txnPoolNodeSet)
    alive_pool.remove(demoted_node)

    demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node)
    demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, demoted_node)

    demoted_nym = None
    for _, txn in txnPoolNodeSet[0].poolManager.ledger.getAllTxn():
        txn_data = get_payload_data(txn)
        if txn_data[DATA][ALIAS] == demoted_node.name:
            demoted_nym = txn_data[TARGET_NYM]
            break
    assert demoted_nym
    # Every node demote `demoted_node`
    assert all(
        node.poolManager.reqHandler.getNodeData(demoted_nym)[SERVICES] == []
        for node in alive_pool)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_stewards[0], request_count)

    looper.run(
        eventually(
            lambda: assertExp(txnPoolNodeSet[0].domainLedger.size - request_count == \
                              demoted_node.domainLedger.size)))
def test_multiple_batches_for_pool(looper, helpers,
                                   nodeSetWithIntegratedTokenPlugin,
                                   sdk_pool_handle, fees_set, address_main,
                                   mint_tokens):
    node_set = nodeSetWithIntegratedTokenPlugin
    node_stashers = [n.nodeIbStasher for n in node_set]

    amount = get_amount_from_token_txn(mint_tokens)
    init_seq_no = 1
    request1, request2 = nyms_with_fees(2,
                                        helpers,
                                        fees_set,
                                        address_main,
                                        amount,
                                        init_seq_no=init_seq_no)

    txns_count_before = get_committed_txns_count_for_pool(
        node_set, TOKEN_LEDGER_ID)
    with delay_rules(node_stashers, cDelay()):
        r1 = sdk_send_signed_requests(sdk_pool_handle,
                                      [json.dumps(request1.as_dict)])
        looper.runFor(waits.expectedPrePrepareTime(len(node_set)))
        r2 = sdk_send_signed_requests(sdk_pool_handle,
                                      [json.dumps(request2.as_dict)])
        looper.runFor(waits.expectedPrePrepareTime(len(node_set)))
        for n in node_set:
            n.start_catchup()
        for n in node_set:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
    txns_count_after = get_committed_txns_count_for_pool(
        node_set, TOKEN_LEDGER_ID)
    assert txns_count_after == txns_count_before
    ensure_all_nodes_have_same_data(looper, node_set)
def test_revert_xfer_with_fees_before_catchup(looper, helpers,
                                              nodeSetWithIntegratedTokenPlugin,
                                              sdk_pool_handle, fees,
                                              xfer_mint_tokens,
                                              xfer_addresses):
    nodes = nodeSetWithIntegratedTokenPlugin
    node_stashers = [n.nodeIbStasher for n in nodes]
    helpers.general.do_set_fees(fees)
    [address_giver, address_receiver] = xfer_addresses
    inputs = helpers.general.get_utxo_addresses([address_giver])[0]
    outputs = [{
        ADDRESS: address_receiver,
        AMOUNT: 1000 - fees[XFER_PUBLIC_FEES_ALIAS]
    }]
    request = helpers.request.transfer(inputs, outputs)
    with delay_rules_without_processing(node_stashers, cDelay(), pDelay()):
        helpers.sdk.send_request_objects([request])
        looper.runFor(waits.expectedPrePrepareTime(len(nodes)))
        for n in nodes:
            n.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
        for n in nodes:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))
    ensure_all_nodes_have_same_data(looper, nodes)
예제 #9
0
def test_last_committed_after_catchup(looper, helpers,
                                      nodeSetWithIntegratedTokenPlugin,
                                      sdk_pool_handle,
                                      fees_set, address_main, mint_tokens):
    node_set = nodeSetWithIntegratedTokenPlugin
    reverted_node = node_set[-1]

    amount = get_amount_from_token_txn(mint_tokens)
    init_seq_no = 1
    request_1, request_2 = nyms_with_fees(2,
                                          helpers,
                                          fees_set,
                                          address_main,
                                          amount,
                                          init_seq_no=init_seq_no)
    reverted_last_committed = get_last_committed_from_tracker(reverted_node)
    not_reverted_last_committed = get_last_committed_from_tracker(node_set[-1])
    assert reverted_last_committed == not_reverted_last_committed
    with delay_rules(reverted_node.nodeIbStasher, cDelay()):
        """
        Send NYM with FEES and wait for reply. 
        """
        r = sdk_sign_and_submit_req_obj(looper, sdk_pool_handle, helpers.request._steward_wallet, request_1)
        sdk_get_and_check_replies(looper, [r])
        """
        Start catchup. Uncommitted batch for reverted_node should be rejected and it will get 
        NYM with FEES during catchup procedure. 
        """
        reverted_node.start_catchup()
        looper.run(eventually(lambda: assertExp(reverted_node.mode == Mode.participating)))
        assert get_last_committed_from_tracker(reverted_node) ==\
               get_last_committed_from_tracker(node_set[0])
예제 #10
0
def test_freeing_forwarded_preprepared_request(looper, chkFreqPatched,
                                               reqs_for_checkpoint,
                                               txnPoolNodeSet, sdk_pool_handle,
                                               sdk_wallet_steward):
    # Case, when both backup and primary had problems
    behind_node = txnPoolNodeSet[-1]

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_steward,
                                         CHK_FREQ, CHK_FREQ)
    with delay_rules(
            behind_node.nodeIbStasher,
            pDelay(delay=sys.maxsize),
            cDelay(delay=sys.maxsize),
    ):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)

        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)

        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    looper.run(
        eventually(lambda: assertExp(len(behind_node.requests) == req_num)))
    assert all(r.executed for r in behind_node.requests.values()
               if behind_node.seqNoDB.get(r.request.key)[1])

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_steward,
                                         CHK_FREQ, CHK_FREQ)

    # Master and backup replicas do not stash new requests and succesfully order them
    assert len(behind_node.requests) == req_num
def test_deletion_non_forwarded_request(
        looper, chkFreqPatched, reqs_for_checkpoint, txnPoolNodeSet,
        sdk_pool_handle, sdk_wallet_steward, tconf, tdir, allPluginsPath):
    behind_node = txnPoolNodeSet[-1]
    [behind_node.replicas.values()[1].discard_req_key(1, key) for key in behind_node.requests]
    behind_node.requests.clear()

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                         sdk_wallet_steward, CHK_FREQ, CHK_FREQ)
    behind_node.quorums.propagate = Quorum(len(txnPoolNodeSet) + 1)

    with delay_rules(behind_node.nodeIbStasher,
                     ppDelay(delay=sys.maxsize),
                     pDelay(delay=sys.maxsize),
                     cDelay(delay=sys.maxsize)):
        count = behind_node.spylog.count(behind_node.allLedgersCaughtUp)
        sdk_send_batches_of_random(looper, txnPoolNodeSet, sdk_pool_handle,
                                   sdk_wallet_steward, req_num, req_num)
        looper.run(eventually(node_caughtup, behind_node, count, retryWait=1))

    # We clear caughtup requests
    looper.run(eventually(lambda: assertExp(len(behind_node.requests) == 0)))
    assert all([len(q) == 0 for r in behind_node.replicas.values() for q in r._ordering_service.requestQueues.values()])
    assert len(behind_node.clientAuthNr._verified_reqs) == 0
    assert len(behind_node.requestSender) == 0
예제 #12
0
def test_backup_stabilized_checkpoint_on_view_change(looper, txnPoolNodeSet,
                                                     sdk_wallet_client,
                                                     sdk_pool_handle):
    # Delta:1
    backup = txnPoolNodeSet[-1].replicas[1]
    count_of_replicas = len(txnPoolNodeSet[0].replicas)
    with delay_rules_without_processing(
        [n.nodeIbStasher for n in txnPoolNodeSet], ppDelay(instId=0)):
        sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                                 REQ_COUNT)
        looper.run(
            eventually(
                lambda r: assertExp(r.last_ordered_3pc == (0, REQ_COUNT)),
                backup))
        # assert that all of requests are propagated
        for n in txnPoolNodeSet:
            for req in n.requests.values():
                assert req.forwardedTo == count_of_replicas

        ensure_view_change(looper, txnPoolNodeSet)
        ensureElectionsDone(looper, txnPoolNodeSet)

        # check, that all requests was freed on backups
        for n in txnPoolNodeSet:
            for req in n.requests.values():
                assert req.forwardedTo == count_of_replicas - 1
예제 #13
0
def test_catchup_after_replica_addition(looper, sdk_pool_handle, txnPoolNodeSet,
                                        sdk_wallet_steward, tdir, tconf, allPluginsPath):
    view_no = txnPoolNodeSet[-1].viewNo
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_steward, 1)
    waitNodeDataEquality(looper, *txnPoolNodeSet)

    # create node
    new_node_name = "Theta"
    sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
        prepare_new_node_data(tconf, tdir, new_node_name)
    new_node = create_and_start_new_node(looper=looper, node_name=new_node_name,
                                         tdir=tdir, sigseed=sigseed,
                                         node_ha=(nodeIp, nodePort), client_ha=(clientIp, clientPort),
                                         tconf=tconf, auto_start=True, plugin_path=allPluginsPath,
                                         nodeClass=TestNode)

    _send_txn_for_creating_node(looper, sdk_pool_handle, sdk_wallet_steward, tdir, new_node_name, clientIp,
                                clientPort, nodeIp, nodePort, bls_key, sigseed, key_proof)

    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))

    looper.run(eventually(lambda: assertExp(n.viewNo == view_no + 1 for n in txnPoolNodeSet)))
    waitNodeDataEquality(looper, *txnPoolNodeSet)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 1)
    waitNodeDataEquality(looper, *txnPoolNodeSet, exclude_from_check=['check_last_ordered_3pc'])
def test_resending_only_for_known_clients(tdir, looper, stacks, alpha_handler):
    alpha, beta = stacks
    unknown_identity = "unknown_identity"
    pending_client_messages = beta._client_message_provider._pending_client_messages
    msg1 = {'msg': 'msg1'}
    msg2 = {'msg': 'msg2'}

    beta.send(msg1, alpha.listener.IDENTITY)
    beta.send(msg1, unknown_identity)
    assert pending_client_messages[alpha.listener.IDENTITY] == [(0, msg1)]
    assert pending_client_messages[unknown_identity] == [(0, msg1)]
    message_pending_unknown_id = pending_client_messages[unknown_identity]
    beta._client_message_provider._timer.set_time(1)

    alpha.connect(name=beta.name,
                  ha=beta.ha,
                  verKeyRaw=beta.verKeyRaw,
                  publicKeyRaw=beta.publicKeyRaw)

    looper.runFor(0.25)

    alpha.send(msg2, beta.name)
    looper.run(
        eventually(
            lambda msg_handler: assertExp(msg_handler.received_messages ==
                                          [msg1, msg2]), alpha_handler))
    assert alpha.listener.IDENTITY not in pending_client_messages
    assert pending_client_messages[
        unknown_identity] == message_pending_unknown_id
예제 #15
0
def test_lag_less_then_catchup(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client):
    delayed_node = txnPoolNodeSet[-1]
    other_nodes = list(set(txnPoolNodeSet) - {delayed_node})
    current_view_no = checkViewNoForNodes(txnPoolNodeSet)
    last_ordered_before = delayed_node.master_replica.last_ordered_3pc
    with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay()):
        # Send txns for stable checkpoint
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ)
        # Check, that all of not slowed nodes has a stable checkpoint
        for n in other_nodes:
            assert n.master_replica._consensus_data.stable_checkpoint == CHK_FREQ

        # Send another txn. This txn will be reordered after view_change
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)
        trigger_view_change(txnPoolNodeSet)
        ensureElectionsDone(looper, txnPoolNodeSet)

        assert delayed_node.master_replica.last_ordered_3pc == last_ordered_before

    # Send txns for stabilize checkpoint on other nodes
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, CHK_FREQ - 1)

    pool_pp_seq_no = get_pp_seq_no(other_nodes)
    looper.run(eventually(lambda: assertExp(delayed_node.master_replica.last_ordered_3pc[1] == pool_pp_seq_no)))
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
def test_invalid_msgs_are_not_stashed(tdir, looper, stacks, alpha_handler,
                                      tconf):
    alpha, beta = stacks
    pending_client_messages = beta._client_message_provider._pending_client_messages
    msg = {'msg': 'msg1' * tconf.MSG_LEN_LIMIT}
    assert not pending_client_messages
    assert not alpha_handler.received_messages

    alpha.connect(name=beta.name,
                  ha=beta.ha,
                  verKeyRaw=beta.verKeyRaw,
                  publicKeyRaw=beta.publicKeyRaw)

    looper.runFor(0.25)

    alpha.send(msg, beta.name)
    assert not pending_client_messages
    assert not alpha_handler.received_messages

    msg = {'msg': 'msg1'}
    alpha.send(msg, beta.name)
    looper.run(
        eventually(
            lambda msg_handler: assertExp(msg_handler.received_messages ==
                                          [msg]), alpha_handler))
def test_revert_nym_with_fees_before_catchup(looper, helpers,
                                             nodeSetWithIntegratedTokenPlugin,
                                             fees_set, fees, xfer_mint_tokens,
                                             xfer_addresses):
    nodes = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(xfer_mint_tokens)
    seq_no = get_seq_no(xfer_mint_tokens)
    lagging_node = nodes[-1]
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(),
                                        pDelay()):
        current_amount, seq_no, _ = send_and_check_nym_with_fees(
            helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
        looper.runFor(waits.expectedPrePrepareTime(len(nodes)))
        lagging_node.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
        for n in nodes:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))
    ensure_all_nodes_have_same_data(looper, nodes)
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, xfer_addresses, current_amount)
    ensure_all_nodes_have_same_data(looper, nodes)
def test_revert_works_for_fees_after_view_change(looper, helpers,
                                                 nodeSetWithIntegratedTokenPlugin,
                                                 sdk_pool_handle,
                                                 fees_set,
                                                 mint_tokens, addresses, fees):
    node_set = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = get_seq_no(mint_tokens)
    reverted_node = nodeSetWithIntegratedTokenPlugin[-1]

    current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                                             current_amount)
    current_amount, seq_no, _ = send_and_check_transfer(helpers, addresses, fees, looper, current_amount, seq_no)

    with delay_rules_without_processing(reverted_node.nodeIbStasher, delay_3pc(view_no=0, msgs=Commit)):
        len_batches_before = len(reverted_node.master_replica._ordering_service.batches)
        current_amount, seq_no, _ = send_and_check_transfer(helpers, addresses, fees, looper, current_amount, seq_no)
        current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                                                 current_amount)
        looper.runFor(waits.expectedPrePrepareTime(len(nodeSetWithIntegratedTokenPlugin)))
        len_batches_after = len(reverted_node.master_replica._ordering_service.batches)

        """
        Checks, that we have a 2 new batches
        """
        assert len_batches_after - len_batches_before == 2
        for n in node_set:
            n.view_changer.on_master_degradation()
        ensure_view_change(looper, nodeSetWithIntegratedTokenPlugin)

        looper.run(eventually(lambda: assertExp(reverted_node.mode == Mode.participating)))
    ensure_all_nodes_have_same_data(looper, node_set)

    send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses, current_amount)
    ensure_all_nodes_have_same_data(looper, node_set)
예제 #19
0
def test_revert_during_view_change_all_nodes_set_fees(
        tconf, nodeSetWithIntegratedTokenPlugin, fees_set, helpers, looper):
    """
        Check that SET_FEES transaction will be written after view change when PREPARE quorum for it is reached
    """
    nodes = nodeSetWithIntegratedTokenPlugin
    node_set = [n.nodeIbStasher for n in nodeSetWithIntegratedTokenPlugin]

    _old_pp_seq_no = get_ppseqno_from_all_nodes(
        nodeSetWithIntegratedTokenPlugin)
    helpers.general.set_fees_without_waiting({ATTRIB_FEES_ALIAS: 3})

    assert _old_pp_seq_no == get_ppseqno_from_all_nodes(
        nodeSetWithIntegratedTokenPlugin)

    with delay_rules(node_set, cDelay()):
        # should be changed for auth rule
        helpers.general.set_fees_without_waiting({ATTRIB_FEES_ALIAS: 4})
        looper.run(
            eventually(
                functools.partial(check_batch_ordered, _old_pp_seq_no,
                                  nodeSetWithIntegratedTokenPlugin)))
        ensure_view_change(looper, nodes)

    ensureElectionsDone(looper=looper, nodes=nodes)
    ensure_all_nodes_have_same_data(looper, nodes)
    for n in nodes:
        looper.run(eventually(lambda: assertExp(n.mode == Mode.participating)))
    for n in nodes:
        looper.run(eventually(check_state, n, True, retryWait=0.2, timeout=15))

    fees = helpers.general.do_get_fees()
    assert fees[FEES][ATTRIB_FEES_ALIAS] == 4
예제 #20
0
def test_revert_works_for_fees_before_catch_up_on_all_nodes(
        looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle,
        sdk_wallet_trustee, fees_set, address_main, mint_tokens):
    node_set = [n.nodeIbStasher for n in nodeSetWithIntegratedTokenPlugin]

    with delay_rules(node_set, cDelay()):
        request = helpers.request.nym()

        request = add_fees_request_with_address(helpers, fees_set, request,
                                                address_main)
        for n in nodeSetWithIntegratedTokenPlugin:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))

        sdk_send_signed_requests(sdk_pool_handle,
                                 [json.dumps(request.as_dict)])

        for n in nodeSetWithIntegratedTokenPlugin:
            looper.run(
                eventually(check_state, n, False, retryWait=0.2, timeout=15))

        for n in nodeSetWithIntegratedTokenPlugin:
            n.start_catchup()

        for n in nodeSetWithIntegratedTokenPlugin:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))

        for n in nodeSetWithIntegratedTokenPlugin:
            looper.run(
                eventually(check_state, n, True, retryWait=0.2, timeout=15))

    ensure_all_nodes_have_same_data(looper, nodeSetWithIntegratedTokenPlugin)
def test_revert_auth_rule_changing(looper, txnPoolNodeSet, sdk_wallet_trustee,
                                   sdk_wallet_steward, sdk_pool_handle):
    node_stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
    wh, _ = sdk_wallet_trustee
    new_steward_did, new_steward_verkey = create_verkey_did(looper, wh)
    new_steward_did2, new_steward_verkey2 = create_verkey_did(looper, wh)
    """We try to change rule for adding new steward. For this case we """
    changed_constraint = AuthConstraint(role=STEWARD, sig_count=1)
    action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD)
    with delay_rules_without_processing(node_stashers, pDelay(), cDelay()):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_wallet_trustee,
            sdk_pool_handle,
            auth_action=ADD_PREFIX,
            auth_type=action.txn_type,
            field=action.field,
            new_value=action.value,
            old_value=None,
            constraint=changed_constraint.as_dict,
            no_wait=True)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Try to add new steward by already existed trustee.
        Validation should raise exception because we change uncommitted state
        by adding new rule, that "Only steward can add new steward"
        """
        with pytest.raises(RequestRejectedException,
                           match="TRUSTEE can not do this action"):
            sdk_add_new_nym(looper,
                            sdk_pool_handle,
                            sdk_wallet_trustee,
                            'newSteward1',
                            STEWARD_STRING,
                            dest=new_steward_did,
                            verkey=new_steward_verkey)
        looper.runFor(waits.expectedPrePrepareTime(len(txnPoolNodeSet)))
        """
        Catchup should revert config_state and discard rule changing
        """
        for n in txnPoolNodeSet:
            n.start_catchup()
        for n in txnPoolNodeSet:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
    """
    Try to create new steward by steward
    We can not do this, because AUTH_RULE txn was reverted
    """
    with pytest.raises(RequestRejectedException,
                       match="STEWARD can not do this action"):
        sdk_add_new_nym(looper,
                        sdk_pool_handle,
                        sdk_wallet_steward,
                        'newSteward2',
                        STEWARD_STRING,
                        dest=new_steward_did2,
                        verkey=new_steward_verkey2)
def test_revert_works_for_fees_before_catch_up_on_one_node(
        looper, helpers, nodeSetWithIntegratedTokenPlugin, sdk_pool_handle,
        fees_set, address_main, mint_tokens):
    node_set = nodeSetWithIntegratedTokenPlugin
    reverted_node = node_set[-1]

    amount = get_amount_from_token_txn(mint_tokens)
    init_seq_no = 1
    request_1, request_2 = nyms_with_fees(2,
                                          helpers,
                                          fees_set,
                                          address_main,
                                          amount,
                                          init_seq_no=init_seq_no)
    c_ledger_root_before = get_committed_txn_root_for_pool([reverted_node],
                                                           TOKEN_LEDGER_ID)
    with delay_rules(reverted_node.nodeIbStasher, cDelay()):
        """
        Send NYM with FEES and wait for reply. All of nodes, except reverted_node will order them 
        """
        r = sdk_send_signed_requests(sdk_pool_handle,
                                     [json.dumps(request_1.as_dict)])
        sdk_get_and_check_replies(looper, r)
        check_state(reverted_node, is_equal=False)
        c_ledger_root_for_other = get_committed_txn_root_for_pool(
            node_set[:-1], TOKEN_LEDGER_ID)
        """
        Start catchup. Uncommitted batch for reverted_node should be rejected and it will get 
        NYM with FEES during catchup procedure. 
        """
        reverted_node.start_catchup()
        looper.run(
            eventually(
                lambda: assertExp(reverted_node.mode == Mode.participating)))
        check_state(reverted_node, is_equal=True)
        """
        Check, that committed txn root was changed and it's the same as for others
        """
        c_ledger_root_after = get_committed_txn_root_for_pool([reverted_node],
                                                              TOKEN_LEDGER_ID)
        assert c_ledger_root_after != c_ledger_root_before
        assert c_ledger_root_after == c_ledger_root_for_other
    ensure_all_nodes_have_same_data(looper, node_set)
    c_ledger_root_before = get_committed_txn_root_for_pool(
        node_set, TOKEN_LEDGER_ID)
    """
    Send another NYM with FEES and check, that committed ledger's root was changed
    """
    r = sdk_send_signed_requests(sdk_pool_handle,
                                 [json.dumps(request_2.as_dict)])
    sdk_get_and_check_replies(looper, r)
    c_ledger_root_after = get_committed_txn_root_for_pool(
        node_set, TOKEN_LEDGER_ID)
    assert c_ledger_root_after != c_ledger_root_before
    ensure_all_nodes_have_same_data(looper, node_set)
    for n in node_set:
        check_state(n, is_equal=True)
def test_order_empty_pre_prepare(looper, tconf, txnPoolNodeSet):
    assert all(node.master_replica.last_ordered_3pc == (0, 0)
               for node in txnPoolNodeSet)
    assert all(
        node.spylog.count(node.processOrdered) == 0 for node in txnPoolNodeSet)

    replica = getPrimaryReplica([txnPoolNodeSet[0]], instId=0)
    replica._ordering_service._do_send_3pc_batch(ledger_id=POOL_LEDGER_ID)

    looper.run(
        eventually(lambda: assertExp(
            all(node.master_replica.last_ordered_3pc == (0, 1)
                for node in txnPoolNodeSet))))
    looper.run(
        eventually(lambda: assertExp(
            all(
                node.spylog.count(node.processOrdered) == 1
                for node in txnPoolNodeSet))))
예제 #24
0
def test_revert_for_all_after_view_change(looper, helpers,
                                          nodeSetWithIntegratedTokenPlugin,
                                          sdk_pool_handle, fees_set,
                                          mint_tokens, addresses, fees):
    node_set = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = get_seq_no(mint_tokens)
    reverted_node = nodeSetWithIntegratedTokenPlugin[-1]

    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)
    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, addresses, fees, looper, current_amount, seq_no)

    ensure_all_nodes_have_same_data(looper, node_set)

    with delay_rules([n.nodeIbStasher for n in node_set], cDelay(), pDelay()):
        len_batches_before = len(reverted_node.master_replica.batches)
        current_amount, seq_no, resp1 = send_and_check_transfer(
            helpers,
            addresses,
            fees,
            looper,
            current_amount,
            seq_no,
            check_reply=False)
        current_amount, seq_no, resp2 = send_and_check_nym_with_fees(
            helpers,
            fees_set,
            seq_no,
            looper,
            addresses,
            current_amount,
            check_reply=False)
        looper.runFor(
            waits.expectedPrePrepareTime(
                len(nodeSetWithIntegratedTokenPlugin)))
        len_batches_after = len(reverted_node.master_replica.batches)
        """
        Checks, that we have a 2 new batches
        """
        assert len_batches_after - len_batches_before == 2
        for n in node_set:
            n.view_changer.on_master_degradation()

        ensure_view_change_complete(looper, nodeSetWithIntegratedTokenPlugin)

        looper.run(
            eventually(
                lambda: assertExp(reverted_node.mode == Mode.participating)))
    ensure_all_nodes_have_same_data(looper, node_set)
    sdk_get_and_check_replies(looper, resp1)
    sdk_get_and_check_replies(looper, resp2)
    send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                 current_amount)
    ensure_all_nodes_have_same_data(looper, node_set)
예제 #25
0
def testSelfNominationDelay(tdir_for_func):
    nodeNames = ["testA", "testB", "testC", "testD"]
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            delay = 30
            # Add node A
            nodeA = addNodeBack(nodeSet, looper, nodeNames[0])
            nodeA.delaySelfNomination(delay)

            nodesBCD = []
            for name in nodeNames[1:]:
                # nodesBCD.append(nodeSet.addNode(name, i+1, AutoMode.never))
                nodesBCD.append(addNodeBack(nodeSet, looper, name))

            # Ensuring that NodeA is started before any other node to demonstrate
            # that it is delaying self nomination
            looper.run(
                eventually(lambda: assertExp(nodeA.isReady()),
                           retryWait=1,
                           timeout=5))

            # Elections should be done
            ensureElectionsDone(looper=looper,
                                nodes=nodeSet,
                                retryWait=1,
                                timeout=10)

            # node A should not have any primary replica
            looper.run(
                eventually(lambda: assertExp(not nodeA.hasPrimary),
                           retryWait=1,
                           timeout=10))

            # Make sure that after at the most 30 seconds, nodeA's
            # `startElection` is called
            looper.run(
                eventually(lambda: assertExp(
                    len(nodeA.spylog.getAll(Node.decidePrimaries.__name__)) > 0
                ),
                           retryWait=1,
                           timeout=30))
def test_view_change_on_quorum_of_master_degraded(txnPoolNodeSet, looper,
                                                  sdk_pool_handle,
                                                  sdk_wallet_steward, viewNo):
    """
    Node will change view even though it does not find the master to be degraded
    when a quorum of nodes agree that master performance degraded
    """

    m_primary_node = get_master_primary_node(list(txnPoolNodeSet))

    # Delay processing of PRE-PREPARE from all non primary replicas of master
    # so master's performance falls and view changes
    delayNonPrimaries(txnPoolNodeSet, 0, 10)

    pr = getPrimaryReplica(txnPoolNodeSet, 0)
    relucatantNode = pr.node

    # Count sent instance changes of all nodes
    sentInstChanges = {}
    for n in txnPoolNodeSet:
        sentInstChanges[n.name] = node_sent_instance_changes_count(n)

    # Node reluctant to change view, never says master is degraded
    relucatantNode.monitor.isMasterDegraded = types.MethodType(
        lambda x: False, relucatantNode.monitor)

    backup_replica = txnPoolNodeSet[0].replicas[1]
    backup_last_ordered_before = backup_replica.last_ordered_3pc
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 4)
    # make sure that backups also ordered at least 1 batch to be able to track performance degradation
    looper.run(
        eventually(lambda: assertExp(backup_replica.last_ordered_3pc >
                                     backup_last_ordered_before)))

    for n in txnPoolNodeSet:
        n.checkPerformance()

    # Check that view change happened for all nodes
    waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=viewNo + 1)

    # All nodes except the reluctant node should have sent a view change and
    # thus must have called `sendInstanceChange`
    for n in txnPoolNodeSet:
        if n.name != relucatantNode.name:
            assert node_sent_instance_changes_count(n) > sentInstChanges.get(
                n.name, 0)
        else:
            assert node_sent_instance_changes_count(n) == sentInstChanges.get(
                n.name, 0)

    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
    new_m_primary_node = get_master_primary_node(list(txnPoolNodeSet))
    assert m_primary_node.name != new_m_primary_node.name
    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
예제 #27
0
def test_revert_set_fees_and_view_change_all_nodes(
        nodeSetWithIntegratedTokenPlugin, xfer_mint_tokens, helpers, looper,
        xfer_addresses):
    """
        Send SET_FEES and init view change. Check that it is reverted and transaction passes with old fees
    """
    def _get_len_preprepares(n):
        replica = n.master_replica
        return len(replica._ordering_service.sent_preprepares if replica.
                   isPrimary else replica._ordering_service.prePrepares)

    def _check_len_pprs(old_pprs_len):
        _len_pprs = set([
            _get_len_preprepares(n) for n in nodeSetWithIntegratedTokenPlugin
        ])
        _len_ppr = _len_pprs.pop()
        assert old_pprs_len + 1 == _len_ppr

    helpers.general.do_set_fees({NYM_FEES_ALIAS: 3})
    nodes = nodeSetWithIntegratedTokenPlugin
    node_stashers = [n.nodeIbStasher for n in nodeSetWithIntegratedTokenPlugin]
    seq_no = get_seq_no(xfer_mint_tokens)
    _old_len_pprs = set(
        [_get_len_preprepares(n) for n in nodeSetWithIntegratedTokenPlugin])
    assert len(_old_len_pprs)
    _old_len_ppr = _old_len_pprs.pop()

    with delay_rules_without_processing(node_stashers, cDelay()):
        helpers.general.set_fees_without_waiting({NYM_FEES_ALIAS: 5})
        looper.run(eventually(functools.partial(_check_len_pprs,
                                                _old_len_ppr)))
        send_and_check_nym_with_fees(helpers, {FEES: {
            NYM_FEES_ALIAS: 5
        }},
                                     seq_no,
                                     looper,
                                     xfer_addresses,
                                     1000,
                                     check_reply=False)
        for n in nodeSetWithIntegratedTokenPlugin:
            n.start_catchup()
        for n in nodes:
            looper.run(
                eventually(lambda: assertExp(n.mode == Mode.participating)))
    ensure_all_nodes_have_same_data(looper, nodes)
    send_and_check_nym_with_fees(helpers, {FEES: {
        NYM_FEES_ALIAS: 3
    }},
                                 seq_no,
                                 looper,
                                 xfer_addresses,
                                 1000,
                                 check_reply=False)
    ensure_all_nodes_have_same_data(looper, nodes)
def test_resending_pending_client_msgs(looper, txnPoolNodeSet, sdk_pool_handle,
                                       sdk_wallet_client, sdk_wallet_steward,
                                       tdir, tconf, allPluginsPath,
                                       monkeypatch):
    problem_node = txnPoolNodeSet[1]

    def fail_send_multipart(msg_parts,
                            flags=0,
                            copy=True,
                            track=False,
                            **kwargs):
        raise zmq.ZMQError(113, "")

    # Switch off replies for client from Beta, Gamma, Delta
    for node in txnPoolNodeSet[2:]:
        monkeypatch.setattr(
            node.clientstack.listener,
            'send_multipart',
            lambda msg_parts, flags=0, copy=True, track=False, **kwargs: None)
    monkeypatch.setattr(problem_node.clientstack.listener, 'send_multipart',
                        fail_send_multipart)

    start_master_last_ordered_3pc = txnPoolNodeSet[0].master_last_ordered_3PC[
        1]
    # Send the first request. Nodes should reject it.
    resp_task = sdk_add_new_nym(looper,
                                sdk_pool_handle,
                                sdk_wallet_client,
                                role=TRUSTEE_STRING,
                                no_wait=True)
    looper.run(
        eventually(
            lambda node: assertExp(node.master_last_ordered_3PC[
                1] == start_master_last_ordered_3pc + 1), txnPoolNodeSet[0]))
    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
    monkeypatch.delattr(problem_node.clientstack.listener,
                        'send_multipart',
                        raising=True)

    # Send the second request.
    sdk_reqs = sdk_send_random_requests(looper, sdk_pool_handle,
                                        sdk_wallet_client, 1)

    # Waiting reject for the first request, which will sent with a reply for the second request.
    with pytest.raises(
            RequestRejectedException,
            match="Only Steward is allowed to do these transactions"):
        _, resp = sdk_get_and_check_replies(looper, [resp_task])[0]

    # Waiting a rely for the second request
    sdk_get_and_check_replies(looper, sdk_reqs)
    monkeypatch.undo()
def test_node_erases_last_sent_pp_key_on_view_change(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf):

    # Get a node with a backup primary replica
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    node = replica.node

    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=3, num_batches=3,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 3)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that there is a stored last sent PrePrepare key on the node
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Make the pool perform view change
    ensure_view_change(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)

    # Verify that the node has erased the stored last sent PrePrepare key
    assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=1, num_batches=1,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (1, 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def test_apply_stashed_partially_ordered(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client):
    test_node = getNonPrimaryReplicas(txnPoolNodeSet)[0].node
    test_stasher = test_node.nodeIbStasher
    ledger_size = max(node.domainLedger.size for node in txnPoolNodeSet)

    def check_pool_ordered_some_requests():
        assert max(node.domainLedger.size
                   for node in txnPoolNodeSet) > ledger_size

    def check_test_node_has_stashed_ordered_requests():
        assert len(test_node.stashedOrderedReqs) > 0

    # Delay COMMITs so requests are not ordered on test node
    with delay_rules(test_stasher, cDelay()):
        reqs = sdk_send_random_requests(looper, sdk_pool_handle,
                                        sdk_wallet_client, TOTAL_REQUESTS)
        looper.run(eventually(check_pool_ordered_some_requests))

    # Get some of txns that need to be ordered
    ledger_info = test_node.ledgerManager.ledgerRegistry[DOMAIN_LEDGER_ID]
    txns = ledger_info.ledger.uncommittedTxns
    txns = txns[:len(txns) // 2]
    assert len(txns) > 1

    # Emulate incomplete catchup simultaneous with generation of ORDERED message
    origin_fun = test_node.try_processing_ordered
    ordered_msgs = []
    test_node.try_processing_ordered = lambda msg: ordered_msgs.append(msg)
    test_node.master_replica.revert_unordered_batches()
    looper.run(eventually(lambda: assertExp(len(ordered_msgs) > 0)))

    test_node.mode = Mode.synced
    test_node.try_processing_ordered = origin_fun
    for msg in ordered_msgs:
        test_node.try_processing_ordered(msg)

    looper.run(eventually(check_test_node_has_stashed_ordered_requests))
    for txn in txns:
        ledger_info.ledger.add(txn)
        ledger_info.postTxnAddedToLedgerClbk(DOMAIN_LEDGER_ID, txn)
    test_node.mode = Mode.participating
    test_node.processStashedOrderedReqs()
    for r in test_node.replicas.values():
        r.stasher.unstash_catchup()

    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    sdk_get_and_check_replies(looper, reqs)
예제 #31
0
def test_catching_up_auth_rule_txn(looper, txnPoolNodeSet, sdk_wallet_trustee,
                                   sdk_wallet_steward, sdk_pool_handle):
    delayed_node = txnPoolNodeSet[-1]
    wh, _ = sdk_wallet_trustee
    new_steward_did, new_steward_verkey = create_verkey_did(looper, wh)
    changed_constraint = AuthConstraint(role=STEWARD, sig_count=1)
    action = AuthActionAdd(txn_type=NYM, field=ROLE, value=STEWARD)
    with pytest.raises(RequestRejectedException,
                       match="Not enough TRUSTEE signatures"):
        sdk_add_new_nym(looper,
                        sdk_pool_handle,
                        sdk_wallet_steward,
                        'newSteward2',
                        STEWARD_STRING,
                        dest=new_steward_did,
                        verkey=new_steward_verkey)
    with delay_rules_without_processing(delayed_node.nodeIbStasher, cDelay(),
                                        pDelay(), ppDelay()):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_action=ADD_PREFIX,
            auth_type=action.txn_type,
            field=action.field,
            new_value=action.value,
            old_value=None,
            constraint=changed_constraint.as_dict)
        sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                        'newSteward2')
        delayed_node.start_catchup()
        looper.run(
            eventually(
                lambda: assertExp(delayed_node.mode == Mode.participating)))
    sdk_add_new_nym(looper,
                    sdk_pool_handle,
                    sdk_wallet_steward,
                    'newSteward3',
                    STEWARD_STRING,
                    dest=new_steward_did,
                    verkey=new_steward_verkey)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    config_state = delayed_node.states[CONFIG_LEDGER_ID]
    from_state = config_state.get(config.make_state_path_for_auth_rule(
        action.get_action_id()),
                                  isCommitted=True)
    assert changed_constraint == ConstraintsSerializer(
        config_state_serializer).deserialize(from_state)
def test_backup_replica_resumes_ordering_on_lag_in_checkpoints(
        looper, chkFreqPatched, reqs_for_checkpoint,
        one_replica_and_others_in_backup_instance,
        sdk_pool_handle, sdk_wallet_client, view_change_done):
    """
    Verifies resumption of ordering 3PC-batches on a backup replica
    on detection of a lag in checkpoints
    """

    slow_replica, other_replicas = one_replica_and_others_in_backup_instance
    view_no = slow_replica.viewNo

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda: assertExp(slow_replica.last_ordered_3pc == (view_no, 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Don't receive Commits from two replicas
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[0].node.name))
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[1].node.name))

    # Send a request for which the replica will not be able to order the batch
    # due to an insufficient count of Commits
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Recover reception of Commits
    slow_replica.node.nodeIbStasher.drop_delayeds()
    slow_replica.node.nodeIbStasher.resetDelays()

    # Send requests but in a quantity insufficient
    # for catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
                             reqs_for_checkpoint - 2)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Ensure that the replica has not ordered any batches
    # after the very first one
    assert slow_replica.last_ordered_3pc == (view_no, 1)

    # Ensure that the watermarks have not been shifted since the view start
    assert slow_replica.h == 0
    assert slow_replica.H == LOG_SIZE

    # Ensure that the collections related to requests, batches and
    # own checkpoints are not empty.
    # (Note that a primary replica removes requests from requestQueues
    # when creating a batch with them.)
    if slow_replica.isPrimary:
        assert slow_replica.sentPrePrepares
    else:
        assert slow_replica.requestQueues[DOMAIN_LEDGER_ID]
        assert slow_replica.prePrepares
    assert slow_replica.prepares
    assert slow_replica.commits
    assert slow_replica.batches
    assert slow_replica.checkpoints

    # Ensure that there are some quorumed stashed checkpoints
    assert slow_replica.stashed_checkpoints_with_quorum()

    # Send more requests to reach catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             reqs_for_checkpoint)

    # Ensure that the replica has adjusted last_ordered_3pc to the end
    # of the last checkpoint
    looper.run(
        eventually(lambda: assertExp(slow_replica.last_ordered_3pc ==
                                     (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that the watermarks have been shifted so that the lower watermark
    # has the same value as last_ordered_3pc
    assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
    assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE

    # Ensure that the collections related to requests, batches and
    # own checkpoints have been cleared
    assert not slow_replica.requestQueues[DOMAIN_LEDGER_ID]
    assert not slow_replica.sentPrePrepares
    assert not slow_replica.prePrepares
    assert not slow_replica.prepares
    assert not slow_replica.commits
    assert not slow_replica.batches
    assert not slow_replica.checkpoints

    # Ensure that now there are no quorumed stashed checkpoints
    assert not slow_replica.stashed_checkpoints_with_quorum()

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda: assertExp(slow_replica.last_ordered_3pc ==
                                     (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def test_stashed_messages_processed_on_backup_replica_ordering_resumption(
        looper, chkFreqPatched, reqs_for_checkpoint,
        one_replica_and_others_in_backup_instance,
        sdk_pool_handle, sdk_wallet_client, view_change_done):
    """
    Verifies resumption of ordering 3PC-batches on a backup replica
    on detection of a lag in checkpoints in case it is detected after
    some 3PC-messages related to the next checkpoint have already been stashed
    as laying outside of the watermarks.
    Please note that to verify this case the config is set up so that
    LOG_SIZE == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
    """

    slow_replica, other_replicas = one_replica_and_others_in_backup_instance
    view_no = slow_replica.viewNo

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda: assertExp(slow_replica.last_ordered_3pc == (view_no, 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Don't receive Commits from two replicas
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[0].node.name))
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[1].node.name))

    # Send a request for which the replica will not be able to order the batch
    # due to an insufficient count of Commits
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Receive further Commits from now on
    slow_replica.node.nodeIbStasher.drop_delayeds()
    slow_replica.node.nodeIbStasher.resetDelays()

    # Send requests but in a quantity insufficient
    # for catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
                             reqs_for_checkpoint - 2)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Don't receive Checkpoints
    slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1))

    # Send more requests to reach catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             reqs_for_checkpoint)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Ensure that there are no 3PC-messages stashed
    # as laying outside of the watermarks
    assert not slow_replica.stashingWhileOutsideWaterMarks

    # Send a request for which the batch will be outside of the watermarks
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Ensure that the replica has not ordered any batches
    # after the very first one
    assert slow_replica.last_ordered_3pc == (view_no, 1)

    # Ensure that the watermarks have not been shifted since the view start
    assert slow_replica.h == 0
    assert slow_replica.H == LOG_SIZE

    # Ensure that there are some quorumed stashed checkpoints
    assert slow_replica.stashed_checkpoints_with_quorum()

    # Ensure that now there are 3PC-messages stashed
    # as laying outside of the watermarks
    assert slow_replica.stashingWhileOutsideWaterMarks

    # Receive belated Checkpoints
    slow_replica.node.nodeIbStasher.reset_delays_and_process_delayeds()

    # Ensure that the replica has ordered the batch for the last sent request
    looper.run(
        eventually(lambda: assertExp(slow_replica.last_ordered_3pc ==
                                     (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that the watermarks have been shifted so that the lower watermark
    # now equals to the end of the last stable checkpoint in the instance
    assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
    assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE

    # Ensure that now there are no quorumed stashed checkpoints
    assert not slow_replica.stashed_checkpoints_with_quorum()

    # Ensure that now there are no 3PC-messages stashed
    # as laying outside of the watermarks
    assert not slow_replica.stashingWhileOutsideWaterMarks

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda: assertExp(slow_replica.last_ordered_3pc ==
                                     (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 2)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def test_node_erases_last_sent_pp_key_on_pool_restart(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
        tconf, tdir, allPluginsPath, chkFreqPatched):

    # Get a node with a backup primary replica and the rest of the nodes
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    node = replica.node

    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=7, num_batches=7,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 7)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Check view no of the node and lastPrePrepareSeqNo of the replica
    assert node.viewNo == 0
    assert replica.lastPrePrepareSeqNo == 7
    assert replica.h == 6
    assert replica.H == 6 + LOG_SIZE

    # Ensure that there is a stored last sent PrePrepare key on the node
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Restart all the nodes in the pool and wait for primary elections done
    all_nodes = copy(txnPoolNodeSet)
    for n in all_nodes:
        disconnect_node_and_ensure_disconnected(looper,
                                                txnPoolNodeSet,
                                                n.name,
                                                timeout=nodeCount,
                                                stopNode=True)
        looper.removeProdable(n)
        txnPoolNodeSet.remove(n)
    for n in all_nodes:
        txnPoolNodeSet.append(start_stopped_node(n, looper, tconf, tdir, allPluginsPath))
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet)

    node = nodeByName(txnPoolNodeSet, node.name)
    replica = node.replicas[backup_inst_id]

    # Verify that the node has erased the stored last sent PrePrepare key
    assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB

    # Verify correspondingly that after the pool restart the replica
    # (which must again be the primary in its instance) has not restored
    # lastPrePrepareSeqNo, not adjusted last_ordered_3pc and not shifted
    # the watermarks
    assert node.viewNo == 0
    assert replica.isPrimary
    assert replica.lastPrePrepareSeqNo == 0
    assert replica.last_ordered_3pc == (0, 0)
    assert replica.h == 0
    assert replica.H == 0 + LOG_SIZE

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=1, num_batches=1,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def test_catchup_not_triggered_if_another_in_progress(
        looper,
        chkFreqPatched,
        reqs_for_checkpoint,
        txnPoolNodeSet,
        sdk_pool_handle,
        sdk_wallet_client,
        broken_node_and_others):
    """
    A node misses 3pc messages and checkpoints during some period but later it
    stashes some amount of checkpoints and starts catchup. When the node is
    performing the catchup, it receives more checkpoints enough to start a new
    catchup but it does not start it because the first catchup is in progress.
    """
    max_batch_size = chkFreqPatched.Max3PCBatchSize
    broken_node, other_nodes = broken_node_and_others

    logger.info("Step 1: The node misses quite a lot of 3PC-messages and checkpoints")

    send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                           sdk_pool_handle,
                                           sdk_wallet_client,
                                           reqs_for_checkpoint + max_batch_size)

    waitNodeDataInequality(looper, broken_node, *other_nodes)

    logger.info(
        "Step 2: The node receives 3PC-messages but cannot process them because of "
        "missed ones. But the node eventually stashes some amount of checkpoints "
        "and after that starts catchup")

    repaired_node = repair_broken_node(broken_node)

    initial_do_start_catchup_times = repaired_node.spylog.count(Node._do_start_catchup)
    initial_all_ledgers_caught_up = repaired_node.spylog.count(Node.allLedgersCaughtUp)

    with delay_rules(repaired_node.nodeIbStasher, cr_delay()):
        send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                               sdk_pool_handle,
                                               sdk_wallet_client,
                                               (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) *
                                               reqs_for_checkpoint - max_batch_size)

        ensure_all_nodes_have_same_data(looper, other_nodes)
        target_ledger_size = other_nodes[0].domainLedger.size

        looper.run(eventually(lambda: assertExp(repaired_node.mode == Mode.syncing),
                              timeout=waits.expectedPoolInterconnectionTime(len(txnPoolNodeSet)) +
                                      waits.expectedPoolConsistencyProof(len(txnPoolNodeSet))))
    
        assert repaired_node.spylog.count(Node._do_start_catchup) - initial_do_start_catchup_times == 1

        logger.info(
            "Step 3: While doing the catchup, the node receives new checkpoints "
            "enough to start a new catchup but the node does not start it because "
            "the former is in progress")

        process_checkpoint_times_before = repaired_node.master_replica.spylog.count(Replica.processCheckpoint)

        send_reqs_batches_and_get_suff_replies(looper, txnPoolNodeSet,
                                               sdk_pool_handle,
                                               sdk_wallet_client,
                                               (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) *
                                               reqs_for_checkpoint)

        # Wait until the node receives the new checkpoints from all the other nodes
        looper.run(
            eventually(lambda: assertExp(repaired_node.master_replica.spylog.count(Replica.processCheckpoint) -
                                         process_checkpoint_times_before ==
                                         (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) *
                                         (len(txnPoolNodeSet) - 1)),
                       timeout=waits.expectedPoolInterconnectionTime(len(txnPoolNodeSet))))
    
        # New catchup is not started when another one is in progress
        assert repaired_node.spylog.count(Node._do_start_catchup) - initial_do_start_catchup_times == 1
        assert repaired_node.mode == Mode.syncing

    logger.info("Step 4: The node completes the catchup. The ledger has been "
                "updated to the level determined on its start")

    looper.run(eventually(lambda: assertExp(repaired_node.mode == Mode.participating),
                          timeout=waits.expectedPoolCatchupTime(len(txnPoolNodeSet))))
    assert repaired_node.spylog.count(Node._do_start_catchup) - initial_do_start_catchup_times == 1
    assert repaired_node.spylog.count(Node.allLedgersCaughtUp) - initial_all_ledgers_caught_up == 1
    assert repaired_node.domainLedger.size == target_ledger_size
def test_backup_primary_restores_pp_seq_no_if_view_is_same(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
        tconf, tdir, allPluginsPath, chkFreqPatched, view_no):

    # Get a node with a backup primary replica
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    node = replica.node

    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=7, num_batches=7,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (view_no, 7)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Check view no of the node and lastPrePrepareSeqNo of the replica
    assert node.viewNo == view_no
    assert replica.lastPrePrepareSeqNo == 7

    # Ensure that the node has stored the last sent PrePrepare key
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB
    last_sent_pre_prepare_key = \
        PrePrepareKey(**node_status_db_serializer.deserialize(
            node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE)))
    assert last_sent_pre_prepare_key == PrePrepareKey(inst_id=backup_inst_id,
                                                      view_no=view_no,
                                                      pp_seq_no=7)

    # Restart the node containing the replica
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node.name,
                                            stopNode=True)
    looper.removeProdable(node)
    txnPoolNodeSet.remove(node)

    node = start_stopped_node(node, looper, tconf, tdir, allPluginsPath)
    txnPoolNodeSet.append(node)

    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet)

    replica = node.replicas[backup_inst_id]

    # Verify that after the successful propagate primary procedure the replica
    # (which must still be the primary in its instance) has restored
    # lastPrePrepareSeqNo and adjusted last_ordered_3pc and shifted
    # the watermarks correspondingly
    assert node.viewNo == view_no
    assert replica.isPrimary
    assert replica.lastPrePrepareSeqNo == 7
    assert replica.last_ordered_3pc == (view_no, 7)
    assert replica.h == 7
    assert replica.H == 7 + LOG_SIZE

    # Verify also that the stored last sent PrePrepare key has not been erased
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=1, num_batches=1,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (view_no, 8)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))