Esempio n. 1
0
def do_view_change_with_propagate_primary_on_one_delayed_node(
        slow_node, nodes, looper, sdk_pool_handle, sdk_wallet_client):

    slow_stasher = slow_node.nodeIbStasher

    fast_nodes = [n for n in nodes if n != slow_node]

    stashers = [n.nodeIbStasher for n in nodes]

    # Get last prepared certificate in pool
    lpc = last_prepared_certificate(nodes)
    # Get pool current view no
    view_no = lpc[0]

    with delay_rules(slow_stasher, icDelay()):
        with delay_rules(slow_stasher, vcd_delay()):
            with delay_rules(stashers, cDelay()):
                # Send request
                request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

                # Wait until this request is prepared on N-f nodes
                looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1)))

                # Trigger view change
                for n in nodes:
                    n.view_changer.on_master_degradation()

                # Wait until view change is completed on all nodes except slow one
                waitForViewChange(looper,
                                  fast_nodes,
                                  expectedViewNo=view_no + 1,
                                  customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
                wait_for_elections_done_on_given_nodes(looper,
                                                       fast_nodes,
                                                       getRequiredInstances(len(nodes)),
                                                       timeout=waits.expectedPoolElectionTimeout(len(nodes)))

            # Now all the nodes receive Commits
            # The slow node will accept Commits and order the 3PC-batch in the old view
            looper.runFor(waits.expectedOrderingTime(getNoInstances(len(nodes))))

        # Now slow node receives ViewChangeDones
        waitForViewChange(looper,
                          [slow_node],
                          expectedViewNo=view_no + 1,
                          customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
        wait_for_elections_done_on_given_nodes(looper,
                                               [slow_node],
                                               getRequiredInstances(len(nodes)),
                                               timeout=waits.expectedPoolElectionTimeout(len(nodes)))

    # Now slow node receives InstanceChanges but discards them because already
    # started propagate primary to the same view.

    # Finish request gracefully
    sdk_get_reply(looper, request)
Esempio n. 2
0
def do_view_change_with_propagate_primary_on_one_delayed_node(
        slow_node, nodes, looper, sdk_pool_handle, sdk_wallet_client):

    slow_stasher = slow_node.nodeIbStasher

    fast_nodes = [n for n in nodes if n != slow_node]

    stashers = [n.nodeIbStasher for n in nodes]

    # Get last prepared certificate in pool
    lpc = last_prepared_certificate(nodes)
    # Get pool current view no
    view_no = lpc[0]

    with delay_rules(slow_stasher, icDelay()):
        with delay_rules(slow_stasher, vcd_delay()):
            with delay_rules(stashers, cDelay()):
                # Send request
                request = sdk_send_random_request(looper, sdk_pool_handle, sdk_wallet_client)

                # Wait until this request is prepared on N-f nodes
                looper.run(eventually(check_last_prepared_certificate_on_quorum, nodes, (lpc[0], lpc[1] + 1)))

                # Trigger view change
                for n in nodes:
                    n.view_changer.on_master_degradation()

                # Wait until view change is completed on all nodes except slow one
                waitForViewChange(looper,
                                  fast_nodes,
                                  expectedViewNo=view_no + 1,
                                  customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
                wait_for_elections_done_on_given_nodes(looper,
                                                       fast_nodes,
                                                       getRequiredInstances(len(nodes)),
                                                       timeout=waits.expectedPoolElectionTimeout(len(nodes)))

            # Now all the nodes receive Commits
            # The slow node will accept Commits and order the 3PC-batch in the old view
            looper.runFor(waits.expectedOrderingTime(getNoInstances(len(nodes))))

        # Now slow node receives ViewChangeDones
        waitForViewChange(looper,
                          [slow_node],
                          expectedViewNo=view_no + 1,
                          customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(nodes)))
        wait_for_elections_done_on_given_nodes(looper,
                                               [slow_node],
                                               getRequiredInstances(len(nodes)),
                                               timeout=waits.expectedPoolElectionTimeout(len(nodes)))

    # Now slow node receives InstanceChanges but discards them because already
    # started propagate primary to the same view.

    # Finish request gracefully
    sdk_get_reply(looper, request)
Esempio n. 3
0
def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on(
        looper, allPluginsPath, tdir, tconf,
        txnPoolNodeSet, wallet1, client1):
    nodes = txnPoolNodeSet

    request1 = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request1])

    stop_node(nodes[0], looper, nodes)
    waitForViewChange(looper, nodes[1:], expectedViewNo=1)
    ensureElectionsDone(looper, nodes[1:],
                        numInstances=getRequiredInstances(nodeCount))

    request2 = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request2])

    stop_node(nodes[1], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[2:], expectedViewNo=1)

    request3 = sendRandomRequest(wallet1, client1)
    verify_request_not_replied_and_not_ordered(request3, looper, client1, nodes)

    stop_node(nodes[2], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[3:], expectedViewNo=1)

    request4 = sendRandomRequest(wallet1, client1)
    verify_request_not_replied_and_not_ordered(request4, looper, client1, nodes)

    nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath)
    looper.runFor(waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[3:], expectedViewNo=1)

    request5 = sendRandomRequest(wallet1, client1)
    verify_request_not_replied_and_not_ordered(request5, looper, client1, nodes)

    nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes[1:],
                        numInstances=getRequiredInstances(nodeCount))
    checkViewNoForNodes(nodes[1:], expectedViewNo=1)

    request6 = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request6])

    nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes,
                        numInstances=getRequiredInstances(nodeCount))
    checkViewNoForNodes(nodes, expectedViewNo=1)

    request7 = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request7])
Esempio n. 4
0
def ensureElectionsDone(
        looper: Looper,
        nodes: Sequence[TestNode],
        retryWait: float = None,  # seconds
        customTimeout: float = None,
        numInstances: int = None) -> Sequence[TestNode]:
    # TODO: Change the name to something like `ensure_primaries_selected`
    # since there might not always be an election, there might be a round
    # robin selection
    """
    Wait for elections to be complete

    :param retryWait:
    :param customTimeout: specific timeout
    :param numInstances: expected number of protocol instances
    :return: primary replica for each protocol instance
    """

    if retryWait is None:
        retryWait = 1

    if customTimeout is None:
        customTimeout = waits.expectedPoolElectionTimeout(len(nodes))

    return checkProtocolInstanceSetup(looper=looper,
                                      nodes=nodes,
                                      retryWait=retryWait,
                                      customTimeout=customTimeout,
                                      numInstances=numInstances)
def test_restart_majority_to_same_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                        sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    majority = txnPoolNodeSet[:3]
    minority = txnPoolNodeSet[3:]

    # Restart majority group
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    majority_before_restart = majority.copy()
    restart_nodes(looper, txnPoolNodeSet, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, majority, instances_list=range(2))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, txnPoolNodeSet, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
Esempio n. 6
0
def checkProtocolInstanceSetup(looper: Looper,
                               nodes: Sequence[TestNode],
                               retryWait: float = 1,
                               customTimeout: float = None,
                               numInstances: int = None):
    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))

    checkEveryProtocolInstanceHasOnlyOnePrimary(looper=looper,
                                                nodes=nodes,
                                                retryWait=retryWait,
                                                timeout=timeout,
                                                numInstances=numInstances)

    checkEveryNodeHasAtMostOnePrimary(looper=looper,
                                      nodes=nodes,
                                      retryWait=retryWait,
                                      customTimeout=timeout)

    primaryReplicas = {
        replica.instId: replica
        for node in nodes for replica in node.replicas if replica.isPrimary
    }
    return [
        r[1]
        for r in sorted(primaryReplicas.items(), key=operator.itemgetter(0))
    ]
def testNodeDoesNotParticipateUntilCaughtUp(txnPoolNodeSet,
                                            nodes_slow_to_process_catchup_reqs,
                                            sdk_node_created_after_some_txns):
    """
    A new node that joins after some transactions should stash new transactions
    until it has caught up
    :return:
    """

    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_created_after_some_txns
    txnPoolNodeSet.append(new_node)
    old_nodes = txnPoolNodeSet[:-1]
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 4)
    chk_commits_prepares_recvd(0, old_nodes, new_node)

    for node in old_nodes:
        node.reset_delays_and_process_delayeds()

    timeout = waits.expectedPoolCatchupTime(len(txnPoolNodeSet)) + \
              catchup_delay + \
              waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout)
    waitNodeDataEquality(looper, new_node, *old_nodes,
                         exclude_from_check=['check_last_ordered_3pc_backup'])

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 2)

    # Commits and Prepares are received by all old nodes
    with pytest.raises(AssertionError):
        # Since nodes discard 3PC messages for already ordered requests.
        chk_commits_prepares_recvd(0, old_nodes, new_node)
    waitNodeDataEquality(looper, new_node, *old_nodes)
Esempio n. 8
0
def checkProtocolInstanceSetup(looper: Looper,
                               nodes: Sequence[TestNode],
                               retryWait: float = 1,
                               customTimeout: float = None,
                               instances: Sequence[int] = None,
                               check_primaries=True):
    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))

    checkEveryProtocolInstanceHasOnlyOnePrimary(looper=looper,
                                                nodes=nodes,
                                                retryWait=retryWait,
                                                timeout=timeout,
                                                instances_list=instances)

    checkEveryNodeHasAtMostOnePrimary(looper=looper,
                                      nodes=nodes,
                                      retryWait=retryWait,
                                      customTimeout=timeout)

    def check_not_in_view_change():
        assert all([not n.master_replica._consensus_data.waiting_for_new_view for n in nodes])
    looper.run(eventually(check_not_in_view_change, retryWait=retryWait, timeout=customTimeout))

    if check_primaries:
        for n in nodes[1:]:
            assert nodes[0].primaries == n.primaries

    primaryReplicas = {replica.instId: replica
                       for node in nodes
                       for replica in node.replicas.values() if replica.isPrimary}
    return [r[1] for r in
            sorted(primaryReplicas.items(), key=operator.itemgetter(0))]
Esempio n. 9
0
def checkProtocolInstanceSetup(looper: Looper,
                               nodes: Sequence[TestNode],
                               retryWait: float = 1,
                               customTimeout: float = None,
                               instances: Sequence[int] = None,
                               check_primaries=True):
    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))

    checkEveryProtocolInstanceHasOnlyOnePrimary(looper=looper,
                                                nodes=nodes,
                                                retryWait=retryWait,
                                                timeout=timeout,
                                                instances_list=instances)

    checkEveryNodeHasAtMostOnePrimary(looper=looper,
                                      nodes=nodes,
                                      retryWait=retryWait,
                                      customTimeout=timeout)

    if check_primaries:
        for n in nodes[1:]:
            assert nodes[0].primaries == n.primaries

    primaryReplicas = {
        replica.instId: replica
        for node in nodes for replica in node.replicas.values()
        if replica.isPrimary
    }
    return [
        r[1]
        for r in sorted(primaryReplicas.items(), key=operator.itemgetter(0))
    ]
Esempio n. 10
0
def test_restart_majority_to_same_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                        sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    majority = txnPoolNodeSet[:3]
    minority = txnPoolNodeSet[3:]

    # Restart majority group
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    majority_before_restart = majority.copy()
    restart_nodes(looper, txnPoolNodeSet, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, majority, instances_list=range(2))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, txnPoolNodeSet, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def testNodeDoesNotParticipateUntilCaughtUp(txnPoolNodeSet,
                                            nodes_slow_to_process_catchup_reqs,
                                            sdk_node_created_after_some_txns):
    """
    A new node that joins after some transactions should stash new transactions
    until it has caught up
    :return:
    """

    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_created_after_some_txns
    txnPoolNodeSet.append(new_node)
    old_nodes = txnPoolNodeSet[:-1]
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 5)
    chk_commits_prepares_recvd(0, old_nodes, new_node)

    for node in old_nodes:
        node.reset_delays_and_process_delayeds()

    timeout = waits.expectedPoolCatchupTime(len(txnPoolNodeSet)) + \
              catchup_delay + \
              waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout)
    waitNodeDataEquality(looper, new_node, *old_nodes)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 2)

    # Commits and Prepares are received by all old nodes
    with pytest.raises(AssertionError):
        # Since nodes discard 3PC messages for already ordered requests.
        chk_commits_prepares_recvd(0, old_nodes, new_node)
    waitNodeDataEquality(looper, new_node, *old_nodes)
def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir,
                                    allPluginsPath, sdk_pool_handle,
                                    sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restart half of nodes
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(
        len(txnPoolNodeSet))
    nodes_before_restart = txnPoolNodeSet.copy()
    restart_nodes(looper,
                  txnPoolNodeSet,
                  txnPoolNodeSet[2:],
                  tconf,
                  tdir,
                  allPluginsPath,
                  after_restart_timeout=tm,
                  start_one_by_one=False)

    # Check that nodes didn't think they may have inconsistent 3PC state
    for node in nodes_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes don't think they may have inconsistent 3PC state
    for node in txnPoolNodeSet:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
Esempio n. 13
0
def ensureElectionsDone(looper: Looper,
                        nodes: Sequence[TestNode],
                        retryWait: float = None,  # seconds
                        customTimeout: float = None,
                        instances_list: Sequence[int] = None) -> Sequence[TestNode]:
    # TODO: Change the name to something like `ensure_primaries_selected`
    # since there might not always be an election, there might be a round
    # robin selection
    """
    Wait for elections to be complete

    :param retryWait:
    :param customTimeout: specific timeout
    :param instances_list: expected number of protocol instances
    :return: primary replica for each protocol instance
    """

    if retryWait is None:
        retryWait = 1

    if customTimeout is None:
        customTimeout = waits.expectedPoolElectionTimeout(len(nodes))

    return checkProtocolInstanceSetup(
        looper=looper,
        nodes=nodes,
        retryWait=retryWait,
        customTimeout=customTimeout,
        instances=instances_list)
Esempio n. 14
0
def testNodeDoesNotParticipateUntilCaughtUp(txnPoolNodeSet,
                                            nodes_slow_to_process_catchup_reqs,
                                            nodeCreatedAfterSomeTxns):
    """
    A new node that joins after some transactions should stash new transactions
    until it has caught up
    :return:
    """

    looper, new_node, client, wallet, newStewardClient, newStewardWallet = \
        nodeCreatedAfterSomeTxns
    txnPoolNodeSet.append(new_node)
    old_nodes = txnPoolNodeSet[:-1]
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    chk_commits_prepares_recvd(0, old_nodes, new_node)

    for node in old_nodes:
        node.reset_delays_and_process_delayeds()

    timeout = waits.expectedPoolCatchupTime(len(txnPoolNodeSet)) + \
              catchup_delay + \
              waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout)
    waitNodeDataEquality(looper, new_node, *old_nodes)

    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 2)

    # Commits and Prepares are received by all old nodes
    with pytest.raises(AssertionError):
        # Since nodes discard 3PC messages for already ordered requests.
        chk_commits_prepares_recvd(0, old_nodes, new_node)
    waitNodeDataEquality(looper, new_node, *old_nodes)
Esempio n. 15
0
def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns,
                 poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary, tdir):
    # prepare new ha for node and client stack
    subjectedNode = None
    stewardName = None
    stewardsSeed = None

    for nodeIndex, n in enumerate(txnPoolNodeSet):
        if shouldBePrimary == n.has_master_primary:
            subjectedNode = n
            stewardName = poolTxnStewardNames[nodeIndex]
            stewardsSeed = poolTxnData["seeds"][stewardName].encode()
            break

    nodeStackNewHA, clientStackNewHA = genHa(2)
    logger.debug("change HA for node: {} to {}".format(
        subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))

    nodeSeed = poolTxnData["seeds"][subjectedNode.name].encode()

    # change HA
    stewardClient, req = changeHA(looper, tconf, subjectedNode.name, nodeSeed,
                                  nodeStackNewHA, stewardName, stewardsSeed,
                                  basedir=tdirWithClientPoolTxns)

    waitForSufficientRepliesForRequests(looper, stewardClient,
                                        requests=[req])

    # stop node for which HA will be changed
    subjectedNode.stop()
    looper.removeProdable(subjectedNode)

    # start node with new HA
    config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir)
    restartedNode = TestNode(subjectedNode.name,
                             config_helper=config_helper,
                             config=tconf, ha=nodeStackNewHA,
                             cliha=clientStackNewHA)
    looper.add(restartedNode)
    txnPoolNodeSet[nodeIndex] = restartedNode
    looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70))

    electionTimeout = waits.expectedPoolElectionTimeout(
        nodeCount=len(txnPoolNodeSet),
        numOfReelections=3)
    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        retryWait=1,
                        customTimeout=electionTimeout)

    # start client and check the node HA
    anotherClient, _ = genTestClient(tmpdir=tdirWithClientPoolTxns,
                                     usePoolLedger=True)
    looper.add(anotherClient)
    looper.run(eventually(anotherClient.ensureConnectedToNodes))
    stewardWallet = Wallet(stewardName)
    stewardWallet.addIdentifier(signer=DidSigner(seed=stewardsSeed))
    sendReqsToNodesAndVerifySuffReplies(
        looper, stewardWallet, stewardClient, 8)
def test_all_replicas_hold_request_keys(
        perf_chk_patched,
        looper,
        txnPoolNodeSet,
        sdk_wallet_client,
        sdk_pool_handle):
    """
    All replicas whether primary or non primary hold request keys of forwarded
    requests. Once requests are ordered, they request keys are removed from replica.
    """
    tconf = perf_chk_patched
    delay_3pc = 2
    delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc)
    delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc)

    def chk(count):
        # All replicas have same amount of forwarded request keys and all keys
        # are finalised.
        for node in txnPoolNodeSet:
            for r in node.replicas.values():
                if r.isPrimary is False:
                    assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == count
                    for i in range(count):
                        k = r.requestQueues[DOMAIN_LEDGER_ID][i]
                        assert r.requests[k].finalised
                elif r.isPrimary is True:
                    assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == 0

    reqs = sdk_signed_random_requests(looper,
                                      sdk_wallet_client,
                                      tconf.Max3PCBatchSize - 1)
    req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs)
    # Only non primary replicas should have all request keys with them
    looper.run(eventually(chk, tconf.Max3PCBatchSize - 1))
    sdk_get_replies(looper, req_resps, timeout=sdk_eval_timeout(
        tconf.Max3PCBatchSize - 1, len(txnPoolNodeSet),
        add_delay_to_timeout=delay_3pc))
    # Replicas should have no request keys with them since they are ordered
    looper.run(eventually(chk, 0))  # Need to wait since one node might not
    # have processed it.

    delay = 1
    for node in txnPoolNodeSet:
        node.nodeIbStasher.delay(nom_delay(delay))

    ensure_view_change(looper, txnPoolNodeSet)
    reqs = sdk_signed_random_requests(looper,
                                      sdk_wallet_client,
                                      2 * tconf.Max3PCBatchSize)
    req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs)
    looper.run(eventually(chk, 2 * tconf.Max3PCBatchSize))

    # Since each nomination is delayed and there will be multiple nominations
    # so adding some extra time
    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \
              len(txnPoolNodeSet) * delay
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout)
    sdk_get_replies(looper, req_resps, timeout=timeout)
    looper.run(eventually(chk, 0))
Esempio n. 17
0
def test_pool_reaches_quorum_after_f_plus_2_nodes_turned_off_and_later_on(
        looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, wallet1, client1,
        client1Connected):

    nodes = txnPoolNodeSet
    initial_view_no = nodes[0].viewNo

    request = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request])

    stop_node(nodes[0], looper, nodes)
    waitForViewChange(looper, nodes[1:], expectedViewNo=initial_view_no + 1)
    ensureElectionsDone(looper,
                        nodes[1:],
                        numInstances=getRequiredInstances(nodeCount))

    request = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request])

    stop_node(nodes[1], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)
    checkViewNoForNodes(nodes[2:], initial_view_no + 1)

    request = sendRandomRequest(wallet1, client1)
    verify_request_not_replied_and_not_ordered(request, looper, client1, nodes)

    stop_node(nodes[2], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)
    checkViewNoForNodes(nodes[3:], initial_view_no + 1)

    request = sendRandomRequest(wallet1, client1)
    verify_request_not_replied_and_not_ordered(request, looper, client1, nodes)

    nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir,
                                  allPluginsPath)
    looper.runFor(waits.expectedPoolElectionTimeout(len(nodes)))

    request = sendRandomRequest(wallet1, client1)
    verify_request_not_replied_and_not_ordered(request, looper, client1, nodes)

    nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir,
                                  allPluginsPath)
    ensureElectionsDone(looper,
                        nodes[1:],
                        numInstances=getRequiredInstances(nodeCount))
    waitForViewChange(looper, nodes[1:], expectedViewNo=initial_view_no + 1)

    request = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request])

    nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir,
                                  allPluginsPath)
    ensureElectionsDone(looper,
                        nodes,
                        numInstances=getRequiredInstances(nodeCount))
    waitForViewChange(looper, nodes, expectedViewNo=initial_view_no + 1)

    request = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[request])
Esempio n. 18
0
def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns,
                 tconf, shouldBePrimary, tdir,
                 sdk_pool_handle, sdk_wallet_stewards):
    # prepare new ha for node and client stack
    subjectedNode = None
    node_index = None

    for nodeIndex, n in enumerate(txnPoolNodeSet):
        if shouldBePrimary == n.has_master_primary:
            subjectedNode = n
            node_index = nodeIndex
            break

    nodeStackNewHA, clientStackNewHA = genHa(2)
    logger.debug("change HA for node: {} to {}".format(
        subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))

    # change HA
    sdk_wallet_steward = sdk_wallet_stewards[node_index]
    node_dest = hexToFriendly(subjectedNode.nodestack.verhex)
    sdk_send_update_node(looper, sdk_wallet_steward,
                         sdk_pool_handle,
                         node_dest, subjectedNode.name,
                         nodeStackNewHA[0], nodeStackNewHA[1],
                         clientStackNewHA[0], clientStackNewHA[1],
                         services=[VALIDATOR])

    # stop node for which HA will be changed
    subjectedNode.stop()
    looper.removeProdable(subjectedNode)

    # start node with new HA
    config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir)
    restartedNode = TestNode(subjectedNode.name,
                             config_helper=config_helper,
                             config=tconf, ha=nodeStackNewHA,
                             cliha=clientStackNewHA)
    looper.add(restartedNode)
    txnPoolNodeSet[nodeIndex] = restartedNode
    looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70))
    sdk_pool_refresh(looper, sdk_pool_handle)

    electionTimeout = waits.expectedPoolElectionTimeout(
        nodeCount=len(txnPoolNodeSet),
        numOfReelections=3)
    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        retryWait=1,
                        customTimeout=electionTimeout)

    # start client and check the node HA
    anotherClient, _ = genTestClient(tmpdir=tdirWithClientPoolTxns,
                                     usePoolLedger=True)
    looper.add(anotherClient)
    looper.run(eventually(anotherClient.ensureConnectedToNodes))
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_steward,
                              8)
def test_restart_groups_wp(looper, txnPoolNodeSet, tconf, tdir,
                           sdk_pool_handle, sdk_wallet_client, allPluginsPath):
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    restart_group = get_group(txnPoolNodeSet, 7, include_primary=True)

    restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, per_add_timeout=None)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_restart_groups_6_of_7_np_no_tm(looper, txnPoolNodeSet, tconf, tdir,
                                        sdk_pool_handle, sdk_wallet_client, allPluginsPath):
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    restart_group = get_group(txnPoolNodeSet, 6, include_primary=False)

    restart_nodes(looper, txnPoolNodeSet, restart_group, tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
Esempio n. 21
0
def changeNodeHa(looper, txnPoolNodeSet,
                 tconf, shouldBePrimary, tdir,
                 sdk_pool_handle, sdk_wallet_stewards,
                 sdk_wallet_client):
    # prepare new ha for node and client stack
    subjectedNode = None
    node_index = None

    for nodeIndex, n in enumerate(txnPoolNodeSet):
        if shouldBePrimary == n.has_master_primary:
            subjectedNode = n
            node_index = nodeIndex
            break

    nodeStackNewHA, clientStackNewHA = genHa(2)
    logger.debug("change HA for node: {} to {}".format(
        subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))

    # change HA
    sdk_wallet_steward = sdk_wallet_stewards[node_index]
    node_dest = hexToFriendly(subjectedNode.nodestack.verhex)
    sdk_send_update_node(looper, sdk_wallet_steward,
                         sdk_pool_handle,
                         node_dest, subjectedNode.name,
                         nodeStackNewHA[0], nodeStackNewHA[1],
                         clientStackNewHA[0], clientStackNewHA[1],
                         services=[VALIDATOR])

    # stop node for which HA will be changed
    subjectedNode.stop()
    looper.removeProdable(subjectedNode)

    # start node with new HA
    config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir)
    restartedNode = TestNode(subjectedNode.name,
                             config_helper=config_helper,
                             config=tconf, ha=nodeStackNewHA,
                             cliha=clientStackNewHA)
    looper.add(restartedNode)
    txnPoolNodeSet[nodeIndex] = restartedNode
    looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70))


    electionTimeout = waits.expectedPoolElectionTimeout(
        nodeCount=len(txnPoolNodeSet),
        numOfReelections=3)
    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        retryWait=1,
                        customTimeout=electionTimeout)

    sdk_pool_refresh(looper, sdk_pool_handle)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              8)
def test_all_replicas_hold_request_keys(perf_chk_patched, looper,
                                        txnPoolNodeSet, sdk_wallet_client,
                                        sdk_pool_handle):
    """
    All replicas whether primary or non primary hold request keys of forwarded
    requests. Once requests are ordered, they request keys are removed from replica.
    """
    tconf = perf_chk_patched
    delay_3pc = 2
    delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc)
    delay_3pc_messages(txnPoolNodeSet, 1, delay_3pc)

    def chk(count):
        # All replicas have same amount of forwarded request keys and all keys
        # are finalised.
        for node in txnPoolNodeSet:
            for r in node.replicas.values():
                if r.isPrimary is False:
                    assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == count
                    for i in range(count):
                        k = r.requestQueues[DOMAIN_LEDGER_ID][i]
                        assert r.requests[k].finalised
                elif r.isPrimary is True:
                    assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == 0

    reqs = sdk_signed_random_requests(looper, sdk_wallet_client,
                                      tconf.Max3PCBatchSize - 1)
    req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs)
    # Only non primary replicas should have all request keys with them
    looper.run(eventually(chk, tconf.Max3PCBatchSize - 1))
    sdk_get_replies(looper,
                    req_resps,
                    timeout=sdk_eval_timeout(tconf.Max3PCBatchSize - 1,
                                             len(txnPoolNodeSet),
                                             add_delay_to_timeout=delay_3pc))
    # Replicas should have no request keys with them since they are ordered
    looper.run(eventually(chk, 0))  # Need to wait since one node might not
    # have processed it.

    delay = 1
    for node in txnPoolNodeSet:
        node.nodeIbStasher.delay(nom_delay(delay))

    ensure_view_change(looper, txnPoolNodeSet)
    reqs = sdk_signed_random_requests(looper, sdk_wallet_client,
                                      2 * tconf.Max3PCBatchSize)
    req_resps = sdk_send_signed_requests(sdk_pool_handle, reqs)
    looper.run(eventually(chk, 2 * tconf.Max3PCBatchSize))

    # Since each nomination is delayed and there will be multiple nominations
    # so adding some extra time
    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \
              len(txnPoolNodeSet) * delay
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout)
    sdk_get_replies(looper, req_resps, timeout=timeout)
    looper.run(eventually(chk, 0))
Esempio n. 23
0
def waitForViewChange(looper, nodeSet, expectedViewNo=None, customTimeout=None):
    """
    Waits for nodes to come to same view.
    Raises exception when time is out
    """

    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodeSet))
    return looper.run(eventually(checkViewNoForNodes,
                                 nodeSet,
                                 expectedViewNo,
                                 timeout=timeout))
def test_view_change_without_primary(nodeSet, looper,
                                     patched_view_change_timeout):

    first, others = stop_nodes_and_remove_first(looper, nodeSet)

    start_and_connect_nodes(looper, others)

    timeout = waits.expectedPoolElectionTimeout(len(nodeSet)) + patched_view_change_timeout

    checkProtocolInstanceSetup(looper=looper, nodes=others, retryWait=1,
                               customTimeout=timeout,
                               numInstances=getRequiredInstances(len(nodeSet)))
Esempio n. 25
0
def waitForViewChange(looper, txnPoolNodeSet, expectedViewNo=None,
                      customTimeout=None):
    """
    Waits for nodes to come to same view.
    Raises exception when time is out
    """

    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    return looper.run(eventually(checkViewNoForNodes,
                                 txnPoolNodeSet,
                                 expectedViewNo,
                                 timeout=timeout))
def testPrimaryElectionCase1(case1Setup, looper, keySharedNodes):
    """
    Case 1 - A node making multiple nominations for a particular node. Consider
    4 nodes A, B, C and D. Lets say node B is malicious and is repeatedly
    nominating Node D
    """
    nodes = keySharedNodes
    nodeA, nodeB, nodeC, nodeD = [nodes.getNode(nm) for nm in nodes.nodeNames]

    # Doesn't matter if nodes reach the ready state or not. Just start them
    looper.run(checkNodesConnected(nodes))

    # Node B sends multiple NOMINATE messages for Node D but only after A has
    # nominated itself
    timeout = waits.expectedPoolNominationTimeout(
        nodeCount=len(keySharedNodes))
    looper.run(
        eventually(checkNomination,
                   nodeA,
                   nodeA.name,
                   retryWait=.25,
                   timeout=timeout))

    instId = getSelfNominationByNode(nodeA)

    for i in range(5):
        # nodeB.send(Nomination(nodeD.name, instId, nodeB.viewNo))
        nodeB.send(nominationByNode(nodeD.name, nodeB, instId))
    nodeB.nodestack.flushOutBoxes()

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one nomination for node D since
    # node D is slow. The one nomination for D, that nodes A, C
    # and D might have would be because of node B
    for node in [nodeA, nodeC, nodeD]:
        assert [n[0] for n in node.elector.nominations[instId].values()].count(
            Replica.generateName(nodeD.name, instId)) \
            <= 1

    timeout = waits.expectedPoolElectionTimeout(nodeCount) + delayOfNomination
    primaryReplicas = ensureElectionsDone(looper=looper,
                                          nodes=nodes,
                                          customTimeout=timeout)

    for node in nodes:
        logger.debug("{}'s nominations {}".format(node,
                                                  node.elector.nominations))
    # Node D should not have any primary
    assert not nodeD.hasPrimary
    # A node other than Node D should not have any replica among the
    # primary replicas
    assert nodeD.name not in [pr.name for pr in primaryReplicas]
def test_view_change_without_primary(txnPoolNodeSet, looper,
                                     patched_view_change_timeout):
    first, others = stop_nodes_and_remove_first(looper, txnPoolNodeSet)

    start_and_connect_nodes(looper, others)

    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + patched_view_change_timeout

    #looper.runFor(40)

    checkProtocolInstanceSetup(looper=looper, nodes=txnPoolNodeSet, retryWait=1,
                               customTimeout=timeout,
                               instances=range(getRequiredInstances(len(txnPoolNodeSet))))
Esempio n. 28
0
def test_all_replicas_hold_request_keys(looper, txnPoolNodeSet, client1,
                                        wallet1, client1Connected, tconf):
    """
    All replicas whether primary or non primary hold request keys of forwarded
    requests. Once requests are ordered, they request keys are removed from replica.
    """
    delay_3pc_messages(txnPoolNodeSet, 0, 2)
    delay_3pc_messages(txnPoolNodeSet, 1, 2)

    def chk(count):
        # All replicas have same amount of forwarded request keys and all keys
        # are finalised.
        for node in txnPoolNodeSet:
            for r in node.replicas:
                if r.isPrimary is False:
                    assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == count
                    for i in range(count):
                        k = r.requestQueues[DOMAIN_LEDGER_ID][i]
                        assert r.requests[k].finalised
                elif r.isPrimary is True:
                    assert len(r.requestQueues[DOMAIN_LEDGER_ID]) == 0

    reqs = sendRandomRequests(wallet1, client1, tconf.Max3PCBatchSize - 1)
    # Only non primary replicas should have all request keys with them
    looper.run(eventually(chk, tconf.Max3PCBatchSize - 1))
    waitForSufficientRepliesForRequests(looper,
                                        client1,
                                        requests=reqs,
                                        add_delay_to_timeout=2)
    # Replicas should have no request keys with them since they are ordered
    looper.run(eventually(chk, 0))  # Need to wait since one node might not
    # have processed it.

    delay = 1
    for node in txnPoolNodeSet:
        node.nodeIbStasher.delay(nom_delay(delay))

    ensure_view_change(looper, txnPoolNodeSet)
    reqs = sendRandomRequests(wallet1, client1, 2 * tconf.Max3PCBatchSize)
    looper.run(eventually(chk, 2 * tconf.Max3PCBatchSize))

    # Since each nomination is delayed and there will be multiple nominations
    # so adding some extra time
    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \
              len(txnPoolNodeSet)*delay
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=timeout)
    waitForSufficientRepliesForRequests(looper,
                                        client1,
                                        requests=reqs,
                                        add_delay_to_timeout=2)
    looper.run(eventually(chk, 0))
Esempio n. 29
0
def checkEveryNodeHasAtMostOnePrimary(looper: Looper,
                                      nodes: Sequence[TestNode],
                                      retryWait: float = None,
                                      customTimeout: float = None):
    def checkAtMostOnePrim(node):
        prims = [r for r in node.replicas if r.isPrimary]
        assert len(prims) <= 1

    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))
    for node in nodes:
        looper.run(eventually(checkAtMostOnePrim,
                              node,
                              retryWait=retryWait,
                              timeout=timeout))
Esempio n. 30
0
def checkEveryNodeHasAtMostOnePrimary(looper: Looper,
                                      nodes: Sequence[TestNode],
                                      retryWait: float = None,
                                      customTimeout: float = None):
    def checkAtMostOnePrim(node):
        prims = [r for r in node.replicas.values() if r.isPrimary]
        assert len(prims) <= 1

    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))
    for node in nodes:
        looper.run(eventually(checkAtMostOnePrim,
                              node,
                              retryWait=retryWait,
                              timeout=timeout))
def test_view_change_without_primary(txnPoolNodeSet, looper, tconf):
    first, others = stop_nodes_and_remove_first(looper, txnPoolNodeSet)

    start_and_connect_nodes(looper, others)

    timeout = waits.expectedPoolElectionTimeout(
        len(txnPoolNodeSet)) + tconf.NEW_VIEW_TIMEOUT

    # looper.runFor(40)

    checkProtocolInstanceSetup(looper=looper,
                               nodes=txnPoolNodeSet,
                               retryWait=1,
                               customTimeout=timeout,
                               instances=range(
                                   getRequiredInstances(len(txnPoolNodeSet))))
def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                                  sdk_pool_handle, sdk_wallet_client):
    restart_timeout = tconf.ToleratePrimaryDisconnection + \
                      waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    primary = txnPoolNodeSet[0]
    alive_nodes = txnPoolNodeSet[1:]
    minority = alive_nodes[-1:]
    majority = alive_nodes[:-1]

    # Move to higher view by killing primary
    primary.cleanupOnStopping = True
    primary.stop()
    looper.removeProdable(primary)
    ensure_node_disconnected(looper, primary, txnPoolNodeSet)
    waitForViewChange(looper, alive_nodes, 1, customTimeout=VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, alive_nodes, instances_list=range(3))

    # Add transaction to ledger
    sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1)

    # Restart majority group
    majority_before_restart = majority.copy()
    restart_nodes(looper, alive_nodes, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    waitForViewChange(looper, majority, 1, customTimeout=2.1 * VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, majority, instances_list=range(3))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, alive_nodes, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, alive_nodes, instances_list=range(3))

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle)
def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                                  sdk_pool_handle, sdk_wallet_client):
    restart_timeout = tconf.ToleratePrimaryDisconnection + \
                      waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    primary = txnPoolNodeSet[0]
    alive_nodes = txnPoolNodeSet[1:]
    minority = alive_nodes[-1:]
    majority = alive_nodes[:-1]

    # Move to higher view by killing primary
    primary.cleanupOnStopping = True
    primary.stop()
    looper.removeProdable(primary)
    ensure_node_disconnected(looper, primary, txnPoolNodeSet)
    waitForViewChange(looper, alive_nodes, 1, customTimeout=VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, alive_nodes, numInstances=3)

    # Add transaction to ledger
    sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1)

    # Restart majority group
    majority_before_restart = majority.copy()
    restart_nodes(looper, alive_nodes, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    waitForViewChange(looper, majority, 1, customTimeout=2.1 * VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, majority, numInstances=3)

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, alive_nodes, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, alive_nodes, numInstances=3)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle)
def testPrimaryElectionCase1(case1Setup, looper, txnPoolNodeSet):
    """
    Case 1 - A node making multiple nominations for a particular node. Consider
    4 nodes A, B, C and D. Lets say node B is malicious and is repeatedly
    nominating Node D
    """
    nodeA, nodeB, nodeC, nodeD = txnPoolNodeSet

    # Doesn't matter if nodes reach the ready state or not. Just start them
    looper.run(checkNodesConnected(txnPoolNodeSet))

    # Node B sends multiple NOMINATE messages for Node D but only after A has
    # nominated itself
    timeout = waits.expectedPoolNominationTimeout(
        nodeCount=len(txnPoolNodeSet))
    looper.run(eventually(checkNomination, nodeA, nodeA.name,
                          retryWait=.25, timeout=timeout))

    instId = getSelfNominationByNode(nodeA)

    for i in range(5):
        # nodeB.send(Nomination(nodeD.name, instId, nodeB.viewNo))
        nodeB.send(nominationByNode(nodeD.name, nodeB, instId))
    nodeB.nodestack.flushOutBoxes()

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one nomination for node D since
    # node D is slow. The one nomination for D, that nodes A, C
    # and D might have would be because of node B
    for node in [nodeA, nodeC, nodeD]:
        assert [n[0] for n in node.elector.nominations[instId].values()].count(
            Replica.generateName(nodeD.name, instId)) \
               <= 1

    timeout = waits.expectedPoolElectionTimeout(nodeCount) + delayOfNomination
    primaryReplicas = ensureElectionsDone(looper=looper,
                                          nodes=txnPoolNodeSet, customTimeout=timeout)

    for node in txnPoolNodeSet:
        logger.debug(
            "{}'s nominations {}".format(node, node.elector.nominations))
    # Node D should not have any primary
    assert not nodeD.hasPrimary
    # A node other than Node D should not have any replica among the
    # primary replicas
    assert nodeD.name not in [pr.name for pr in primaryReplicas]
def testPrimarySelectionAfterPoolReady(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward):
    """
    Once the pool is ready(node has connected to at least 3 other nodes),
    appropriate primary replicas should be selected.
    """

    def checkPrimaryPlacement():
        # Node names sorted by rank
        sortedNodes = sorted(txnPoolNodeSet,
                             key=operator.attrgetter("rank"))

        for idx, node in enumerate(sortedNodes):
            # For instance 0, the primary replica should be on the node with
            # rank 0
            if idx == 0:
                Replica.generateName(sortedNodes[idx].name, 0)
                assert node.replicas[0].isPrimary
                assert not node.replicas[1].isPrimary
                assert not node.replicas[2].isPrimary

            # For instance 1, the primary replica should be on the node with
            # rank 1
            if idx == 1:
                Replica.generateName(sortedNodes[idx].name, 1)
                assert not node.replicas[0].isPrimary
                assert node.replicas[1].isPrimary
                assert not node.replicas[2].isPrimary

            # For instance 2, the primary replica should be on the node with
            # rank 2
            if idx == 2:
                Replica.generateName(sortedNodes[idx].name, 2)
                assert not node.replicas[0].isPrimary
                assert not node.replicas[1].isPrimary
                assert node.replicas[2].isPrimary

    check_rank_consistent_across_each_node(txnPoolNodeSet)
    # Check if the primary is on the correct node
    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    looper.run(eventually(checkPrimaryPlacement, retryWait=1, timeout=timeout))
    # Check if every protocol instance has one and only one primary and any node
    #  has no more than one primary
    checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
def testPrimarySelectionAfterPoolReady(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward):
    """
    Once the pool is ready(node has connected to at least 3 other nodes),
    appropriate primary replicas should be selected.
    """

    def checkPrimaryPlacement():
        # Node names sorted by rank
        sortedNodes = sorted(txnPoolNodeSet,
                             key=operator.attrgetter("rank"))

        for idx, node in enumerate(sortedNodes):
            # For instance 0, the primary replica should be on the node with
            # rank 0
            if idx == 0:
                Replica.generateName(sortedNodes[idx].name, 0)
                assert node.replicas[0].isPrimary
                assert not node.replicas[1].isPrimary
                assert not node.replicas[2].isPrimary

            # For instance 1, the primary replica should be on the node with
            # rank 1
            if idx == 1:
                Replica.generateName(sortedNodes[idx].name, 1)
                assert not node.replicas[0].isPrimary
                assert node.replicas[1].isPrimary
                assert not node.replicas[2].isPrimary

            # For instance 2, the primary replica should be on the node with
            # rank 2
            if idx == 2:
                Replica.generateName(sortedNodes[idx].name, 2)
                assert not node.replicas[0].isPrimary
                assert not node.replicas[1].isPrimary
                assert node.replicas[2].isPrimary

    check_rank_consistent_across_each_node(txnPoolNodeSet)
    # Check if the primary is on the correct node
    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    looper.run(eventually(checkPrimaryPlacement, retryWait=1, timeout=timeout))
    # Check if every protocol instance has one and only one primary and any node
    #  has no more than one primary
    checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)
def testPrimaryElectionCase5(case5Setup, looper, txnPoolNodeSet):
    """
    Case 5 - A node making primary declarations for a multiple other nodes.
    Consider 4 nodes A, B, C, and D. Lets say node B is malicious and
    declares node C as primary to all nodes.
    Again node B declares node D as primary to all nodes.
    """
    A, B, C, D = txnPoolNodeSet

    looper.run(checkNodesConnected(txnPoolNodeSet))

    BRep = Replica.generateName(B.name, 0)
    CRep = Replica.generateName(C.name, 0)
    DRep = Replica.generateName(D.name, 0)

    # Node B first sends PRIMARY msgs for Node C to all nodes
    # B.send(Primary(CRep, 0, B.viewNo))
    B.send(primaryByNode(CRep, B, 0))
    # Node B sends PRIMARY msgs for Node D to all nodes
    # B.send(Primary(DRep, 0, B.viewNo))
    B.send(primaryByNode(DRep, B, 0))

    # Ensure elections are done
    # also have to take into account the catchup procedure
    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \
              waits.expectedPoolCatchupTime(len(txnPoolNodeSet)) + \
              delayOfElectionDone
    ensureElectionsDone(looper=looper,
                        nodes=txnPoolNodeSet,
                        customTimeout=timeout)

    # All nodes from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have primary declarations for node C from node B
    #  since node B first nominated node C
    for node in [A, C, D]:
        logger.debug(
            "node {} should have primary declaration for C from node B".format(
                node))
        assert node.elector.primaryDeclarations[0][BRep][0] == CRep
def testPrimaryElectionCase4(case4Setup, looper):
    """
    Case 4 - A node making multiple primary declarations for a particular node.
    Consider 4 nodes A, B, C and D. Lets say node B is malicious and is
    repeatedly declaring Node D as primary
    """
    allNodes = case4Setup
    A, B, C, D = allNodes

    looper.run(checkNodesConnected(allNodes))

    # Node B sends multiple declarations of node D's 0th protocol instance as
    # primary to all nodes
    for i in range(5):
        # B.send(Primary(D.name, 0, B.viewNo))
        B.send(primaryByNode(D.name, B, 0))

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one primary declaration for node
    # D since node D is slow. The one primary declaration for node D,
    # that nodes A, C and D might have would be because of node B
    def x():
        primDecs = [p[0] for p in node.elector.primaryDeclarations[0].values()]
        assert primDecs.count(D.name) <= 1

    # also have to take into account the catchup procedure
    timeout = waits.expectedPoolNominationTimeout(len(allNodes)) + \
              waits.expectedPoolCatchupTime(len(allNodes))

    for node in (A, C, D):
        looper.run(eventually(x, retryWait=.5, timeout=timeout))

    timeout = waits.expectedPoolElectionTimeout(
        len(allNodes)) + delaySelfNomination
    ensureElectionsDone(looper=looper, nodes=allNodes, customTimeout=timeout)

    # Node D should not have any primary replica
    assert not D.hasPrimary
def testPrimaryElectionCase4(case4Setup, looper):
    """
    Case 4 - A node making multiple primary declarations for a particular node.
    Consider 4 nodes A, B, C and D. Lets say node B is malicious and is
    repeatedly declaring Node D as primary
    """
    allNodes = case4Setup
    A, B, C, D = allNodes

    looper.run(checkNodesConnected(allNodes))

    # Node B sends multiple declarations of node D's 0th protocol instance as
    # primary to all nodes
    for i in range(5):
        # B.send(Primary(D.name, 0, B.viewNo))
        B.send(primaryByNode(D.name, B, 0))

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one primary declaration for node
    # D since node D is slow. The one primary declaration for node D,
    # that nodes A, C and D might have would be because of node B
    def x():
        primDecs = [p[0] for p in node.elector.primaryDeclarations[0].values()]
        assert primDecs.count(D.name) <= 1

    # also have to take into account the catchup procedure
    timeout = waits.expectedPoolNominationTimeout(len(allNodes)) + \
              waits.expectedPoolCatchupTime(len(allNodes))

    for node in (A, C, D):
        looper.run(eventually(x, retryWait=.5, timeout=timeout))

    timeout = waits.expectedPoolElectionTimeout(
        len(allNodes)) + delaySelfNomination
    ensureElectionsDone(looper=looper, nodes=allNodes, customTimeout=timeout)

    # Node D should not have any primary replica
    assert not D.hasPrimary
Esempio n. 40
0
def test_view_change_on_start(tconf, txnPoolNodeSet, looper, sdk_pool_handle,
                              sdk_wallet_client):
    """
    Do view change on a without any requests
    """
    old_view_no = txnPoolNodeSet[0].viewNo
    master_primary = get_master_primary_node(txnPoolNodeSet)
    other_nodes = [n for n in txnPoolNodeSet if n != master_primary]
    delay_3pc = 10
    delay_3pc_messages(txnPoolNodeSet, 0, delay_3pc)
    sent_batches = 2
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             sent_batches * tconf.Max3PCBatchSize)

    def chk1():
        t_root, s_root = check_uncommitteds_equal(other_nodes)
        assert master_primary.domainLedger.uncommittedRootHash != t_root
        assert master_primary.states[DOMAIN_LEDGER_ID].headHash != s_root

    looper.run(eventually(chk1, retryWait=1))
    timeout = tconf.PerfCheckFreq + \
              waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    waitForViewChange(looper,
                      txnPoolNodeSet,
                      old_view_no + 1,
                      customTimeout=timeout)

    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
    check_uncommitteds_equal(txnPoolNodeSet)

    reset_delays_and_process_delayeds(txnPoolNodeSet)
    sdk_send_random_and_check(looper,
                              txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              2 * Max3PCBatchSize,
                              add_delay_to_timeout=delay_3pc)
    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
Esempio n. 41
0
def checkProtocolInstanceSetup(looper: Looper,
                               nodes: Sequence[TestNode],
                               retryWait: float = 1,
                               customTimeout: float = None,
                               instances: Sequence[int] = None):
    timeout = customTimeout or waits.expectedPoolElectionTimeout(len(nodes))

    checkEveryProtocolInstanceHasOnlyOnePrimary(looper=looper,
                                                nodes=nodes,
                                                retryWait=retryWait,
                                                timeout=timeout,
                                                instances_list=instances)

    checkEveryNodeHasAtMostOnePrimary(looper=looper,
                                      nodes=nodes,
                                      retryWait=retryWait,
                                      customTimeout=timeout)

    primaryReplicas = {replica.instId: replica
                       for node in nodes
                       for replica in node.replicas.values() if replica.isPrimary}
    return [r[1] for r in
            sorted(primaryReplicas.items(), key=operator.itemgetter(0))]
def testPrimaryElectionCase5(case5Setup, looper, txnPoolNodeSet):
    """
    Case 5 - A node making primary declarations for a multiple other nodes.
    Consider 4 nodes A, B, C, and D. Lets say node B is malicious and
    declares node C as primary to all nodes.
    Again node B declares node D as primary to all nodes.
    """
    A, B, C, D = txnPoolNodeSet

    looper.run(checkNodesConnected(txnPoolNodeSet))

    BRep = Replica.generateName(B.name, 0)
    CRep = Replica.generateName(C.name, 0)
    DRep = Replica.generateName(D.name, 0)

    # Node B first sends PRIMARY msgs for Node C to all nodes
    # B.send(Primary(CRep, 0, B.viewNo))
    B.send(primaryByNode(CRep, B, 0))
    # Node B sends PRIMARY msgs for Node D to all nodes
    # B.send(Primary(DRep, 0, B.viewNo))
    B.send(primaryByNode(DRep, B, 0))

    # Ensure elections are done
    # also have to take into account the catchup procedure
    timeout = waits.expectedPoolElectionTimeout(len(txnPoolNodeSet)) + \
              waits.expectedPoolCatchupTime(len(txnPoolNodeSet)) + \
              delayOfElectionDone
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, customTimeout=timeout)

    # All nodes from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have primary declarations for node C from node B
    #  since node B first nominated node C
    for node in [A, C, D]:
        logger.debug(
            "node {} should have primary declaration for C from node B"
                .format(node))
        assert node.elector.primaryDeclarations[0][BRep][0] == CRep
def test_restart_half_to_lower_view(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                    sdk_pool_handle, sdk_wallet_client):
    # Add transaction to ledger
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    # Move to higher view
    ensure_view_change_complete(looper, txnPoolNodeSet)

    # Restart half of nodes
    tm = tconf.ToleratePrimaryDisconnection + waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))
    nodes_before_restart = txnPoolNodeSet.copy()
    restart_nodes(looper, txnPoolNodeSet, txnPoolNodeSet[2:], tconf, tdir, allPluginsPath,
                  after_restart_timeout=tm, start_one_by_one=False)

    # Check that nodes didn't think they may have inconsistent 3PC state
    for node in nodes_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes don't think they may have inconsistent 3PC state
    for node in txnPoolNodeSet:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle, num_reqs=2, num_batches=1)
def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on(
        looper, allPluginsPath, tdir, tconf, txnPoolNodeSet, sdk_pool_handle,
        sdk_wallet_client):
    timeout = sdk_eval_timeout(1, len(txnPoolNodeSet))
    nodes = txnPoolNodeSet

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    stop_node(nodes[0], looper, nodes)
    waitForViewChange(looper, nodes[1:], expectedViewNo=1)
    ensureElectionsDone(looper,
                        nodes[1:],
                        numInstances=getRequiredInstances(nodeCount))

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    stop_node(nodes[1], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[2:], expectedViewNo=1)

    sdk_reqs3 = sdk_send_random_requests(looper, sdk_pool_handle,
                                         sdk_wallet_client, 1)
    with pytest.raises(TimeoutError):
        req_res = sdk_get_replies(looper, sdk_reqs3, timeout=timeout)
        sdk_check_reply(req_res[0])

    stop_node(nodes[2], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[3:], expectedViewNo=1)

    sdk_reqs4 = sdk_send_random_requests(looper, sdk_pool_handle,
                                         sdk_wallet_client, 1)
    with pytest.raises(TimeoutError):
        req_res = sdk_get_replies(looper, sdk_reqs4, timeout=timeout)
        sdk_check_reply(req_res[0])

    nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir,
                                  allPluginsPath)
    looper.runFor(waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[3:], expectedViewNo=1)

    sdk_reqs5 = sdk_send_random_requests(looper, sdk_pool_handle,
                                         sdk_wallet_client, 1)
    with pytest.raises(TimeoutError):
        req_res = sdk_get_replies(looper, sdk_reqs5, timeout=timeout)
        sdk_check_reply(req_res[0])

    nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir,
                                  allPluginsPath)
    ensureElectionsDone(looper,
                        nodes[1:],
                        numInstances=getRequiredInstances(nodeCount))
    checkViewNoForNodes(nodes[1:], expectedViewNo=1)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir,
                                  allPluginsPath)
    ensureElectionsDone(looper,
                        nodes,
                        numInstances=getRequiredInstances(nodeCount))
    checkViewNoForNodes(nodes, expectedViewNo=1)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
def test_node_erases_last_sent_pp_key_on_propagate_primary_after_pool_restart(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
        tconf, tdir, allPluginsPath, chkFreqPatched):

    # Get a node with a backup primary replica and the rest of the nodes
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    node = replica.node
    other_nodes = set(txnPoolNodeSet) - {node}

    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=7, num_batches=7,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 7)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Check view no of the node and lastPrePrepareSeqNo of the replica
    assert node.viewNo == 0
    assert replica.lastPrePrepareSeqNo == 7
    assert replica.h == 6
    assert replica.H == 6 + LOG_SIZE

    # Ensure that there is a stored last sent PrePrepare key on the node
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Stop the node containing the replica
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node.name,
                                            stopNode=True)
    looper.removeProdable(node)
    txnPoolNodeSet.remove(node)

    # Restart the rest of the nodes and wait for primary elections done
    for n in other_nodes:
        disconnect_node_and_ensure_disconnected(looper,
                                                txnPoolNodeSet,
                                                n.name,
                                                timeout=nodeCount,
                                                stopNode=True)
        looper.removeProdable(n)
        txnPoolNodeSet.remove(n)
    for n in other_nodes:
        txnPoolNodeSet.append(start_stopped_node(n, looper, tconf, tdir, allPluginsPath))
    looper.run(checkNodesConnected(txnPoolNodeSet,
                                   customTimeout=waits.expectedPoolInterconnectionTime(nodeCount)))
    def chk():
        for n in txnPoolNodeSet:
            for r in n.replicas.values():
                assert r.hasPrimary
    looper.run(eventually(chk, retryWait=1, timeout=waits.expectedPoolElectionTimeout(nodeCount)))

    # Start the stopped node
    node = start_stopped_node(node, looper, tconf, tdir, allPluginsPath)
    txnPoolNodeSet.append(node)

    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet)

    replica = node.replicas[backup_inst_id]

    # Verify that the node has erased the stored last sent PrePrepare key
    assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB

    # Verify correspondingly that after the propagate primary procedure
    # the replica (which must again be the primary in its instance) has
    # not restored lastPrePrepareSeqNo, not adjusted last_ordered_3pc and
    # not shifted the watermarks
    assert node.viewNo == 0
    assert replica.isPrimary
    assert replica.lastPrePrepareSeqNo == 0
    assert replica.last_ordered_3pc == (0, 0)
    assert replica.h == 0
    assert replica.H == 0 + LOG_SIZE

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=1, num_batches=1,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
def test_quorum_after_f_plus_2_nodes_including_primary_turned_off_and_later_on(
        looper, allPluginsPath, tdir, tconf,
        txnPoolNodeSet,
        sdk_pool_handle,
        sdk_wallet_client):
    timeout = sdk_eval_timeout(1, len(txnPoolNodeSet))
    nodes = txnPoolNodeSet

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)

    stop_node(nodes[0], looper, nodes)
    waitForViewChange(looper, nodes[1:], expectedViewNo=1)
    ensureElectionsDone(looper, nodes[1:],
                        instances_list=range(getRequiredInstances(nodeCount)))

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)

    stop_node(nodes[1], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[2:], expectedViewNo=1)

    sdk_reqs3 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs3, timeout=timeout)
        sdk_check_reply(req_res[0])

    stop_node(nodes[2], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[3:], expectedViewNo=1)

    sdk_reqs4 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs4, timeout=timeout)
        sdk_check_reply(req_res[0])

    nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath)
    looper.runFor(waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[3:], expectedViewNo=1)

    sdk_reqs5 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs5, timeout=timeout)
        sdk_check_reply(req_res[0])

    nodes[1] = start_stopped_node(nodes[1], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes[1:],
                        instances_list=range(getRequiredInstances(nodeCount)),
                        customTimeout=60)
    checkViewNoForNodes(nodes[1:], expectedViewNo=1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)

    nodes[0] = start_stopped_node(nodes[0], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes,
                        instances_list=range(getRequiredInstances(nodeCount)),
                        customTimeout=60)
    checkViewNoForNodes(nodes, expectedViewNo=1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)
def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on(
        looper, allPluginsPath, tdir, tconf,
        txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    nodes = txnPoolNodeSet

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)

    stop_node(nodes[4], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:4], expectedViewNo=0)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)

    stop_node(nodes[3], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:3], expectedViewNo=0)

    sdk_reqs3 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs3)
        sdk_check_reply(req_res[0])

    stop_node(nodes[2], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:2], expectedViewNo=0)

    sdk_reqs4 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs4)
        sdk_check_reply(req_res[0])

    nodes[4] = start_stopped_node(nodes[4], looper, tconf, tdir, allPluginsPath)
    looper.runFor(waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:2] + nodes[4:], expectedViewNo=0)

    sdk_reqs5 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs5)
        sdk_check_reply(req_res[0])

    nodes[3] = start_stopped_node(nodes[3], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes[:2] + nodes[3:],
                        instances_list=range(getRequiredInstances(nodeCount)))
    checkViewNoForNodes(nodes[:2] + nodes[3:], expectedViewNo=0)

    sdk_reqs6 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    sdk_get_replies(looper, sdk_reqs6)

    nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes,
                        instances_list=range(getRequiredInstances(nodeCount)))
    checkViewNoForNodes(nodes, expectedViewNo=0)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)
def test_quorum_after_f_plus_2_nodes_but_not_primary_turned_off_and_later_on(
        looper, allPluginsPath, tdir, tconf,
        txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    nodes = txnPoolNodeSet

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)

    stop_node(nodes[4], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:4], expectedViewNo=0)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)

    stop_node(nodes[3], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:3], expectedViewNo=0)

    sdk_reqs3 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs3)
        sdk_check_reply(req_res[0])

    stop_node(nodes[2], looper, nodes)
    looper.runFor(tconf.ToleratePrimaryDisconnection +
                  waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:2], expectedViewNo=0)

    sdk_reqs4 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs4)
        sdk_check_reply(req_res[0])

    nodes[4] = start_stopped_node(nodes[4], looper, tconf, tdir, allPluginsPath)
    looper.runFor(waits.expectedPoolElectionTimeout(len(nodes)))
    checkViewNoForNodes(nodes[:2] + nodes[4:], expectedViewNo=0)

    sdk_reqs5 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    with pytest.raises(PoolLedgerTimeoutException):
        req_res = sdk_get_replies(looper, sdk_reqs5)
        sdk_check_reply(req_res[0])

    nodes[3] = start_stopped_node(nodes[3], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes[:2] + nodes[3:],
                        numInstances=getRequiredInstances(nodeCount))
    checkViewNoForNodes(nodes[:2] + nodes[3:], expectedViewNo=0)

    sdk_reqs6 = sdk_send_random_requests(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_client,
                                         1)
    sdk_get_replies(looper, [sdk_reqs3[0], sdk_reqs4[0], sdk_reqs5[0], sdk_reqs6[0]])

    nodes[2] = start_stopped_node(nodes[2], looper, tconf, tdir, allPluginsPath)
    ensureElectionsDone(looper, nodes,
                        numInstances=getRequiredInstances(nodeCount))
    checkViewNoForNodes(nodes, expectedViewNo=0)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle,
                              sdk_wallet_client,
                              1)
def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection(
        looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle):
    """
    Verifies that a disconnected node with a lagged view accepts
    the current view from the other nodes on re-connection.
    Steps:
    1. Provoke view change to 1.
    2. Ensure that all the nodes complete view change to 1.
    3. Disconnect one node from the rest of the nodes in the pool.
    4. Provoke view change to 2.
    5. Ensure that that all the nodes except for the disconnected one complete
    view change to 2 and the disconnected node remains in the view 1.
    6. Provoke view change to 3.
    5. Ensure that that all the nodes except for the disconnected one complete
    view change to 3 and the disconnected node remains in the view 1.
    8. Connect the disconnected node to the rest of the nodes in the pool.
    9. Ensure that the re-connected node completes view change to 3.
    10. Ensure that all the nodes participate in consensus.
    """
    checkViewNoForNodes(txnPoolNodeSet, 0)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    checkViewNoForNodes(txnPoolNodeSet, 1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    lagged_node = getNonPrimaryReplicas(txnPoolNodeSet)[-1].node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            lagged_node,
                                            stopNode=False)
    other_nodes = list(set(txnPoolNodeSet) - {lagged_node})

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change(looper, other_nodes)
    ensureElectionsDone(looper, other_nodes,
                        instances_list=range(getRequiredInstances(len(txnPoolNodeSet))))
    ensure_all_nodes_have_same_data(looper, other_nodes)
    checkViewNoForNodes(other_nodes, 2)
    checkViewNoForNodes([lagged_node], 1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change(looper, other_nodes)
    ensureElectionsDone(looper, other_nodes,
                        instances_list=range(getRequiredInstances(len(txnPoolNodeSet))))
    ensure_all_nodes_have_same_data(looper, other_nodes)
    checkViewNoForNodes(other_nodes, 3)
    checkViewNoForNodes([lagged_node], 1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, lagged_node)
    waitForViewChange(looper, [lagged_node], 3,
                      customTimeout=waits.expectedPoolElectionTimeout(
                          len(txnPoolNodeSet)))
    ensureElectionsDone(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    checkViewNoForNodes(txnPoolNodeSet, 3)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
Esempio n. 50
0
def test_disconnected_node_with_lagged_view_pulls_up_its_view_on_reconnection(
        looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle):
    """
    Verifies that a disconnected node with a lagged view accepts
    the current view from the other nodes on re-connection.
    Steps:
    1. Provoke view change to 1.
    2. Ensure that all the nodes complete view change to 1.
    3. Disconnect one node from the rest of the nodes in the pool.
    4. Provoke view change to 2.
    5. Ensure that that all the nodes except for the disconnected one complete
    view change to 2 and the disconnected node remains in the view 1.
    6. Provoke view change to 3.
    5. Ensure that that all the nodes except for the disconnected one complete
    view change to 3 and the disconnected node remains in the view 1.
    8. Connect the disconnected node to the rest of the nodes in the pool.
    9. Ensure that the re-connected node completes view change to 3.
    10. Ensure that all the nodes participate in consensus.
    """
    checkViewNoForNodes(txnPoolNodeSet, 0)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    checkViewNoForNodes(txnPoolNodeSet, 1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    lagged_node = getNonPrimaryReplicas(txnPoolNodeSet)[-1].node
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            lagged_node,
                                            stopNode=False)
    other_nodes = list(set(txnPoolNodeSet) - {lagged_node})

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change(looper, other_nodes)
    ensureElectionsDone(looper, other_nodes,
                        numInstances=getRequiredInstances(len(txnPoolNodeSet)))
    ensure_all_nodes_have_same_data(looper, other_nodes)
    checkViewNoForNodes(other_nodes, 2)
    checkViewNoForNodes([lagged_node], 1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    ensure_view_change(looper, other_nodes)
    ensureElectionsDone(looper, other_nodes,
                        numInstances=getRequiredInstances(len(txnPoolNodeSet)))
    ensure_all_nodes_have_same_data(looper, other_nodes)
    checkViewNoForNodes(other_nodes, 3)
    checkViewNoForNodes([lagged_node], 1)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, lagged_node)
    waitForViewChange(looper, [lagged_node], 3,
                      customTimeout=waits.expectedPoolElectionTimeout(
                          len(txnPoolNodeSet)))
    ensureElectionsDone(looper, txnPoolNodeSet)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
    checkViewNoForNodes(txnPoolNodeSet, 3)

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)