def testViewChangesIfMasterPrimaryDisconnected(txnPoolNodeSet, looper, wallet1,
                                               client1, client1Connected,
                                               tconf):
    """
    View change occurs when master's primary is disconnected
    """

    # Setup
    nodes = txnPoolNodeSet

    viewNoBefore = checkViewNoForNodes(nodes)
    old_pr_node = get_master_primary_node(nodes)

    # Stop primary
    stopNodes([old_pr_node], looper)
    looper.removeProdable(old_pr_node)
    remainingNodes = list(set(nodes) - {old_pr_node})
    # Sometimes it takes time for nodes to detect disconnection
    ensure_node_disconnected(looper, old_pr_node, remainingNodes, timeout=20)

    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)

    # Give some time to detect disconnection and then verify that view has
    # changed and new primary has been elected
    waitForViewChange(looper, remainingNodes, viewNoBefore + 1)
    ensure_all_nodes_have_same_data(looper, nodes=remainingNodes)
    new_pr_node = get_master_primary_node(remainingNodes)
    assert old_pr_node != new_pr_node

    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 5)
Exemple #2
0
def test_order_after_demote_and_restart(looper, txnPoolNodeSet,
                                        sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath,
                                        sdk_wallet_stewards):
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 3, 3)

    primary_node = txnPoolNodeSet[0]
    node_to_stop = txnPoolNodeSet[1]
    node_to_demote = txnPoolNodeSet[2]
    txnPoolNodeSet.remove(node_to_demote)

    node_to_stop.cleanupOnStopping = True
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2)

    demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote)

    config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf,
                              pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    txnPoolNodeSet[1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, check_primaries=False)

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 1, 1)

    def get_current_bls_keys(node):
        return node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys

    assert get_current_bls_keys(restarted_node) == get_current_bls_keys(primary_node)
def testNodeCatchupAfterLostConnection(newNodeCaughtUp, txnPoolNodeSet,
                                   nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that has poor internet connection and got unsynced after some
    transactions should eventually get the transactions which happened while
    it was not accessible
    :return:
    """
    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    logger.debug("Disconnecting node {}, ledger size {}".
                 format(newNode, newNode.domainLedger.size))
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, newNode,
                                            stopNode=False)
    looper.removeProdable(newNode)

    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    # Make sure new node got out of sync
    waitNodeDataInequality(looper, newNode, *txnPoolNodeSet[:-1])

    # logger.debug("Ensure node {} gets disconnected".format(newNode))
    ensure_node_disconnected(looper, newNode, txnPoolNodeSet[:-1])

    logger.debug("Connecting the node {} back, ledger size {}".
                 format(newNode, newNode.domainLedger.size))
    looper.add(newNode)

    logger.debug("Waiting for the node to catch up, {}".format(newNode))
    waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])

    logger.debug("Sending more requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 10)
    checkNodeDataForEquality(newNode, *txnPoolNodeSet[:-1])
def test_order_after_demote_and_restart(looper, txnPoolNodeSet,
                                        sdk_pool_handle, sdk_wallet_client, tdir, tconf, allPluginsPath,
                                        sdk_wallet_stewards):
    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 3, 3)

    primary_node = txnPoolNodeSet[0]
    node_to_stop = txnPoolNodeSet[1]
    node_to_demote = txnPoolNodeSet[2]
    txnPoolNodeSet.remove(node_to_demote)

    node_to_stop.cleanupOnStopping = True
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet, timeout=2)

    demote_node(looper, sdk_wallet_stewards[2], sdk_pool_handle, node_to_demote)

    config_helper = PNodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name, config_helper=config_helper, config=tconf,
                              pluginPaths=allPluginsPath, ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    txnPoolNodeSet[1] = restarted_node
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)

    sdk_send_batches_of_random_and_check(looper, txnPoolNodeSet,
                                         sdk_pool_handle, sdk_wallet_client, 1, 1)

    def get_current_bls_keys(node):
        return node.master_replica._bls_bft_replica._bls_bft.bls_key_register._current_bls_keys

    assert get_current_bls_keys(restarted_node) == get_current_bls_keys(primary_node)
Exemple #5
0
def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath,
                  after_restart_timeout=None, start_one_by_one=True, wait_for_elections=True):
    for node_to_stop in restart_set:
        node_to_stop.cleanupOnStopping = True
        node_to_stop.stop()
        looper.removeProdable(node_to_stop)

    rest_nodes = [n for n in nodeSet if n not in restart_set]
    for node_to_stop in restart_set:
        ensure_node_disconnected(looper, node_to_stop, nodeSet, timeout=2)

    if after_restart_timeout:
        looper.runFor(after_restart_timeout)

    for node_to_restart in restart_set.copy():
        config_helper = PNodeConfigHelper(node_to_restart.name, tconf, chroot=tdir)
        restarted_node = TestNode(node_to_restart.name, config_helper=config_helper, config=tconf,
                                  pluginPaths=allPluginsPath, ha=node_to_restart.nodestack.ha,
                                  cliha=node_to_restart.clientstack.ha)
        looper.add(restarted_node)

        idx = nodeSet.index(node_to_restart)
        nodeSet[idx] = restarted_node
        idx = restart_set.index(node_to_restart)
        restart_set[idx] = restarted_node

        rest_nodes += [restarted_node]
        if start_one_by_one:
            looper.run(checkNodesConnected(rest_nodes))

    if not start_one_by_one:
        looper.run(checkNodesConnected(nodeSet))

    if wait_for_elections:
        ensureElectionsDone(looper=looper, nodes=nodeSet)
def test_state_regenerated_from_ledger(looper,
                                       nodeSet, tconf, tdir,
                                       sdk_pool_handle,
                                       sdk_wallet_trustee,
                                       allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    trust_anchors = []
    for i in range(5):
        trust_anchors.append(sdk_add_new_nym(looper, sdk_pool_handle,
                                             sdk_wallet_trustee,
                                             'TA' + str(i),
                                             TRUST_ANCHOR_STRING))
        sdk_add_raw_attribute(looper, sdk_pool_handle,
                              trust_anchors[-1],
                              randomString(6),
                              randomString(10))

    for wh in trust_anchors:
        for i in range(3):
            sdk_add_new_nym(looper, sdk_pool_handle,
                            wh, 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(
        node_to_stop.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=node_to_stop.nodestack.ha,
        cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for wh in trust_anchors:
        sdk_add_new_nym(looper, sdk_pool_handle,
                        wh, 'NP--' + randomString(5))

    ensure_all_nodes_have_same_data(looper, nodeSet)
def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath,
                  after_restart_timeout=None, per_add_timeout=None):
    for node_to_stop in restart_set:
        node_to_stop.cleanupOnStopping = True
        node_to_stop.stop()
        looper.removeProdable(node_to_stop)

    rest_nodes = [n for n in nodeSet if n not in restart_set]
    for node_to_stop in restart_set:
        ensure_node_disconnected(looper, node_to_stop, nodeSet, timeout=2)

    if after_restart_timeout:
        looper.runFor(after_restart_timeout)

    for node_to_restart in restart_set:
        config_helper = PNodeConfigHelper(node_to_restart.name, tconf, chroot=tdir)
        restarted_node = TestNode(node_to_restart.name, config_helper=config_helper, config=tconf,
                                  pluginPaths=allPluginsPath, ha=node_to_restart.nodestack.ha,
                                  cliha=node_to_restart.clientstack.ha)
        looper.add(restarted_node)
        idx = nodeSet.index(node_to_restart)
        nodeSet[idx] = restarted_node
        if per_add_timeout:
            looper.run(checkNodesConnected(rest_nodes + [restarted_node], customTimeout=per_add_timeout))
        rest_nodes += [restarted_node]

    if not per_add_timeout:
        looper.run(checkNodesConnected(nodeSet, customTimeout=after_restart_timeout))
def test_state_regenerated_from_ledger(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated,
                                            nodeSet, tconf,
                                            trustee, trusteeWallet,
                                            allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    trust_anchors = []
    for i in range(5):
        trust_anchors.append(getClientAddedWithRole(nodeSet,
                                                    tdirWithPoolTxns, looper,
                                                    trustee, trusteeWallet,
                                                    'TA' + str(i),
                                                    role=TRUST_ANCHOR))
        addRawAttribute(looper, *trust_anchors[-1], randomString(6),
                        randomString(10), dest=trust_anchors[-1][1].defaultId)

    for tc, tw in trust_anchors:
        for i in range(3):
            getClientAddedWithRole(nodeSet,
                                 tdirWithPoolTxns,
                                 looper,
                                 tc, tw,
                                 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv._dbPath
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop.name, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    restarted_node = TestNode(node_to_stop.name, basedirpath=tdirWithPoolTxns,
                        config=tconf, pluginPaths=allPluginsPath,
                        ha=node_to_stop.nodestack.ha, cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for tc, tw in trust_anchors:
        getClientAddedWithRole(nodeSet,
                             tdirWithPoolTxns,
                             looper,
                             tc, tw,
                             'NP--{}'.format(tc.name))

    ensure_all_nodes_have_same_data(looper, nodeSet)
def test_state_regenerated_from_ledger(looper,
                                       nodeSet, tconf, tdir,
                                       sdk_pool_handle,
                                       sdk_wallet_trustee,
                                       allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    endorsers = []
    for i in range(5):
        endorsers.append(sdk_add_new_nym(looper, sdk_pool_handle,
                                             sdk_wallet_trustee,
                                             'TA' + str(i),
                                             ENDORSER_STRING))
        sdk_add_raw_attribute(looper, sdk_pool_handle,
                              endorsers[-1],
                              randomString(6),
                              randomString(10))

    for wh in endorsers:
        for i in range(3):
            sdk_add_new_nym(looper, sdk_pool_handle,
                            wh, 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(
        node_to_stop.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=node_to_stop.nodestack.ha,
        cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for wh in endorsers:
        sdk_add_new_nym(looper, sdk_pool_handle,
                        wh, 'NP--' + randomString(5))

    ensure_all_nodes_have_same_data(looper, nodeSet)
Exemple #10
0
def restart_nodes(looper, nodeSet, restart_set, tconf, tdir, allPluginsPath,
                  after_restart_timeout=None, start_one_by_one=True, wait_for_elections=True):
    for node_to_stop in restart_set:
        node_to_stop.cleanupOnStopping = True
        node_to_stop.stop()
        looper.removeProdable(node_to_stop)

    rest_nodes = [n for n in nodeSet if n not in restart_set]
    for node_to_stop in restart_set:
        ensure_node_disconnected(looper, node_to_stop, nodeSet, timeout=2)

    if after_restart_timeout:
        looper.runFor(after_restart_timeout)

    for node_to_restart in restart_set.copy():
        config_helper = PNodeConfigHelper(node_to_restart.name, tconf, chroot=tdir)
        restarted_node = TestNode(node_to_restart.name, config_helper=config_helper, config=tconf,
                                  pluginPaths=allPluginsPath, ha=node_to_restart.nodestack.ha,
                                  cliha=node_to_restart.clientstack.ha)
        looper.add(restarted_node)

        idx = nodeSet.index(node_to_restart)
        nodeSet[idx] = restarted_node
        restart_set[idx] = restarted_node

        rest_nodes += [restarted_node]
        if start_one_by_one:
            looper.run(checkNodesConnected(rest_nodes))

    if not start_one_by_one:
        looper.run(checkNodesConnected(nodeSet))

    if wait_for_elections:
        ensureElectionsDone(looper=looper, nodes=nodeSet)
def test_view_changes_if_master_primary_disconnected(txnPoolNodeSet, looper,
                                                     sdk_pool_handle,
                                                     sdk_wallet_client, tdir,
                                                     tconf, allPluginsPath):
    """
    View change occurs when master's primary is disconnected
    """

    # Setup
    nodes = txnPoolNodeSet

    old_view_no = checkViewNoForNodes(nodes)
    old_pr_node = get_master_primary_node(nodes)

    # Stop primary
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            old_pr_node,
                                            stopNode=True)
    looper.removeProdable(old_pr_node)

    remaining_nodes = list(set(nodes) - {old_pr_node})
    # Sometimes it takes time for nodes to detect disconnection
    ensure_node_disconnected(looper, old_pr_node, remaining_nodes, timeout=20)

    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)

    # Give some time to detect disconnection and then verify that view has
    # changed and new primary has been elected
    waitForViewChange(looper, remaining_nodes, old_view_no + 1)
    ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes)
    new_pr_node = get_master_primary_node(remaining_nodes)
    assert old_pr_node != new_pr_node

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 5)

    # Check if old primary can join the pool and still functions
    old_pr_node = start_stopped_node(old_pr_node, looper, tconf, tdir,
                                     allPluginsPath)

    txnPoolNodeSet = remaining_nodes + [old_pr_node]
    looper.run(
        eventually(checkViewNoForNodes,
                   txnPoolNodeSet,
                   old_view_no + 1,
                   timeout=tconf.VIEW_CHANGE_TIMEOUT))

    # After node catches up it set view_no from audit ledger and do not need to do view_change
    assert len(
        getAllReturnVals(old_pr_node.view_changer,
                         old_pr_node.view_changer.start_view_change,
                         compare_val_to=True)) == 0

    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)

    assert not old_pr_node.view_changer._next_view_indications
Exemple #12
0
def disconnect_node_and_ensure_disconnected(looper, poolNodes,
                                            disconnect: Union[str, TestNode],
                                            timeout=None,
                                            stopNode=True):
    if isinstance(disconnect, TestNode):
        disconnect = disconnect.name
    assert isinstance(disconnect, str)
    disconnectPoolNode(poolNodes, disconnect, stopNode=stopNode)
    ensure_node_disconnected(looper, disconnect, poolNodes,
                             timeout=timeout)
def testViewChangesIfMasterPrimaryDisconnected(txnPoolNodeSet, looper, wallet1,
                                               client1, client1Connected,
                                               tconf, tdirWithPoolTxns,
                                               allPluginsPath):
    """
    View change occurs when master's primary is disconnected
    """

    # Setup
    nodes = txnPoolNodeSet

    old_view_no = checkViewNoForNodes(nodes)
    old_pr_node = get_master_primary_node(nodes)

    # Stop primary
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            old_pr_node,
                                            stopNode=True)
    looper.removeProdable(old_pr_node)

    remaining_nodes = list(set(nodes) - {old_pr_node})
    # Sometimes it takes time for nodes to detect disconnection
    ensure_node_disconnected(looper, old_pr_node, remaining_nodes, timeout=20)

    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)

    # Give some time to detect disconnection and then verify that view has
    # changed and new primary has been elected
    waitForViewChange(looper, remaining_nodes, old_view_no + 1)
    ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes)
    new_pr_node = get_master_primary_node(remaining_nodes)
    assert old_pr_node != new_pr_node

    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 5)

    # Check if old primary can join the pool and still functions
    old_pr_node = start_stopped_node(old_pr_node, looper, tconf,
                                     tdirWithPoolTxns, allPluginsPath)

    txnPoolNodeSet = remaining_nodes + [old_pr_node]
    looper.run(
        eventually(checkViewNoForNodes,
                   txnPoolNodeSet,
                   old_view_no + 1,
                   timeout=10))
    assert len(
        getAllReturnVals(old_pr_node,
                         old_pr_node._start_view_change_if_possible,
                         compare_val_to=True)) > 0

    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)

    assert not old_pr_node._next_view_indications
def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                                  sdk_pool_handle, sdk_wallet_client):
    restart_timeout = tconf.ToleratePrimaryDisconnection + \
                      waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    primary = txnPoolNodeSet[0]
    alive_nodes = txnPoolNodeSet[1:]
    minority = alive_nodes[-1:]
    majority = alive_nodes[:-1]

    # Move to higher view by killing primary
    primary.cleanupOnStopping = True
    primary.stop()
    looper.removeProdable(primary)
    ensure_node_disconnected(looper, primary, txnPoolNodeSet)
    waitForViewChange(looper, alive_nodes, 1, customTimeout=VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, alive_nodes, numInstances=3)

    # Add transaction to ledger
    sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1)

    # Restart majority group
    majority_before_restart = majority.copy()
    restart_nodes(looper, alive_nodes, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    waitForViewChange(looper, majority, 1, customTimeout=2.1 * VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, majority, numInstances=3)

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, alive_nodes, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, alive_nodes, numInstances=3)

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle)
def test_restart_to_same_view_with_killed_primary(looper, txnPoolNodeSet, tconf, tdir, allPluginsPath,
                                                  sdk_pool_handle, sdk_wallet_client):
    restart_timeout = tconf.ToleratePrimaryDisconnection + \
                      waits.expectedPoolElectionTimeout(len(txnPoolNodeSet))

    primary = txnPoolNodeSet[0]
    alive_nodes = txnPoolNodeSet[1:]
    minority = alive_nodes[-1:]
    majority = alive_nodes[:-1]

    # Move to higher view by killing primary
    primary.cleanupOnStopping = True
    primary.stop()
    looper.removeProdable(primary)
    ensure_node_disconnected(looper, primary, txnPoolNodeSet)
    waitForViewChange(looper, alive_nodes, 1, customTimeout=VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, alive_nodes, instances_list=range(3))

    # Add transaction to ledger
    sdk_send_random_and_check(looper, alive_nodes, sdk_pool_handle, sdk_wallet_client, 1)

    # Restart majority group
    majority_before_restart = majority.copy()
    restart_nodes(looper, alive_nodes, majority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    waitForViewChange(looper, majority, 1, customTimeout=2.1 * VIEW_CHANGE_TIMEOUT)
    ensureElectionsDone(looper, majority, instances_list=range(3))

    # Check that nodes in minority group are aware that they might have inconsistent 3PC state
    for node in minority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 1

    # Check that nodes in majority group didn't think they might have inconsistent 3PC state
    for node in majority_before_restart:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Check that nodes in majority group don't think they might have inconsistent 3PC state
    for node in majority:
        assert node.spylog.count(node.on_inconsistent_3pc_state) == 0

    # Restart minority group
    restart_nodes(looper, alive_nodes, minority, tconf, tdir, allPluginsPath,
                  after_restart_timeout=restart_timeout, start_one_by_one=False, wait_for_elections=False)
    ensureElectionsDone(looper, alive_nodes, instances_list=range(3))

    # Check that all nodes are still functional
    sdk_ensure_pool_functional(looper, alive_nodes, sdk_wallet_client, sdk_pool_handle)
def test_view_changes_if_master_primary_disconnected(txnPoolNodeSet, looper, sdk_pool_handle,
                                                     sdk_wallet_client, tdir, tconf, allPluginsPath):
    """
    View change occurs when master's primary is disconnected
    """

    # Setup
    nodes = txnPoolNodeSet

    old_view_no = checkViewNoForNodes(nodes)
    old_pr_node = get_master_primary_node(nodes)

    # Stop primary
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet,
                                            old_pr_node, stopNode=True)
    looper.removeProdable(old_pr_node)

    remaining_nodes = list(set(nodes) - {old_pr_node})
    # Sometimes it takes time for nodes to detect disconnection
    ensure_node_disconnected(looper, old_pr_node, remaining_nodes, timeout=20)

    looper.runFor(tconf.ToleratePrimaryDisconnection + 2)

    # Give some time to detect disconnection and then verify that view has
    # changed and new primary has been elected
    waitForViewChange(looper, remaining_nodes, old_view_no + 1)
    ensure_all_nodes_have_same_data(looper, nodes=remaining_nodes)
    new_pr_node = get_master_primary_node(remaining_nodes)
    assert old_pr_node != new_pr_node

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 5)

    # Check if old primary can join the pool and still functions
    old_pr_node = start_stopped_node(old_pr_node, looper, tconf,
                                     tdir, allPluginsPath)

    txnPoolNodeSet = remaining_nodes + [old_pr_node]
    looper.run(eventually(checkViewNoForNodes,
                          txnPoolNodeSet, old_view_no + 1, timeout=tconf.VIEW_CHANGE_TIMEOUT))
    assert len(getAllReturnVals(old_pr_node.view_changer,
                                old_pr_node.view_changer._start_view_change_if_possible,
                                compare_val_to=True)) > 0

    ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)

    assert not old_pr_node.view_changer._next_view_indications
Exemple #17
0
def disconnect_node_and_ensure_disconnected(looper: Looper,
                                            poolNodes: Iterable[TestNode],
                                            disconnect: Union[str, TestNode],
                                            timeout=None,
                                            stopNode=True):
    if isinstance(disconnect, TestNode):
        disconnect = disconnect.name
    assert isinstance(disconnect, str)

    matches = [n for n in poolNodes if n.name == disconnect]
    assert len(matches) == 1
    node_to_disconnect = matches[0]

    disconnectPoolNode(poolNodes, disconnect, stopNode=stopNode)
    ensure_node_disconnected(looper,
                             node_to_disconnect,
                             set(poolNodes) - {node_to_disconnect},
                             timeout=timeout)
Exemple #18
0
def disconnect_node_and_ensure_disconnected(looper: Looper,
                                            poolNodes: Iterable[TestNode],
                                            disconnect: Union[str, TestNode],
                                            timeout=None,
                                            stopNode=True):
    if isinstance(disconnect, TestNode):
        disconnect = disconnect.name
    assert isinstance(disconnect, str)

    matches = [n for n in poolNodes if n.name == disconnect]
    assert len(matches) == 1
    node_to_disconnect = matches[0]

    disconnectPoolNode(poolNodes, disconnect, stopNode=stopNode)
    ensure_node_disconnected(looper,
                             node_to_disconnect,
                             set(poolNodes) - {node_to_disconnect},
                             timeout=timeout)
def test_state_recover_from_ledger(looper, tconf, tdir, sdk_pool_handle,
                                   sdk_wallet_trustee, allPluginsPath,
                                   fees_set, mint_tokens, addresses, fees,
                                   do_post_node_creation,
                                   nodeSetWithIntegratedTokenPlugin, helpers):
    node_set = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = 1

    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)
    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, [addresses[0], addresses[1]],
        fees,
        looper,
        current_amount,
        seq_no,
        transfer_summ=current_amount)

    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, [addresses[1], addresses[2]],
        fees,
        looper,
        current_amount,
        seq_no,
        transfer_summ=current_amount)

    ensure_all_nodes_have_same_data(looper, node_set)

    node_to_stop = node_set[-1]
    state_db_pathes = [
        state._kv.db_path for state in node_to_stop.states.values()
    ]
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, node_set[:-1])

    for path in state_db_pathes:
        shutil.rmtree(path)
    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name,
                              config_helper=config_helper,
                              config=tconf,
                              pluginPaths=allPluginsPath,
                              ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    do_post_node_creation(restarted_node)

    looper.add(restarted_node)
    node_set = node_set[:-1]

    looper.run(checkNodesConnected(node_set))
    waitNodeDataEquality(looper, restarted_node, *node_set[:-1])

    ensure_all_nodes_have_same_data(looper, node_set)

    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, [addresses[2], addresses[0]],
        fees,
        looper,
        current_amount,
        seq_no,
        transfer_summ=current_amount)

    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)

    ensure_all_nodes_have_same_data(looper, node_set)
Exemple #20
0
def test_auth_txn_with_deprecated_key(tconf, tdir, allPluginsPath,
                                      txnPoolNodeSet, looper,
                                      sdk_wallet_trustee, sdk_pool_handle):
    """
    Add to the auth_map a fake rule
    Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
    Send GET_AUTH_RULE txn and check that the fake rule was changed
    Remove the fake auth rule from the map
    Check that we can't get the fake auth rule
    Restart the last node with its state regeneration
    Check that nodes data is equal after changing the existing auth rule (restarted node regenerate config state)
    """

    fake_txn_type = "100002"
    fake_key = AuthActionAdd(txn_type=fake_txn_type, field="*",
                             value="*").get_action_id()
    fake_constraint = one_trustee_constraint
    new_auth_constraint = AuthConstraint(role=STEWARD,
                                         sig_count=1,
                                         need_to_be_owner=False).as_dict

    # Add to the auth_map a fake rule
    with extend_auth_map(txnPoolNodeSet, fake_key, fake_constraint):
        # Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
        sdk_send_and_check_auth_rule_request(looper,
                                             sdk_pool_handle,
                                             sdk_wallet_trustee,
                                             auth_action=ADD_PREFIX,
                                             auth_type=fake_txn_type,
                                             field='*',
                                             new_value='*',
                                             constraint=new_auth_constraint)
        # Send GET_AUTH_RULE txn and check that the fake rule was changed
        result = sdk_send_and_check_get_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_type=fake_txn_type,
            auth_action=ADD_PREFIX,
            field="*",
            new_value="*")[0][1]["result"][DATA][0]
        assert result[AUTH_TYPE] == fake_txn_type
        assert result[CONSTRAINT] == new_auth_constraint

    # Remove the fake auth rule from the map
    # Check that we can't get the fake auth rule
    with pytest.raises(RequestNackedException,
                       match="not found in authorization map"):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_action=ADD_PREFIX,
            auth_type=fake_txn_type,
            field='*',
            new_value='*',
            constraint=AuthConstraint(role=STEWARD,
                                      sig_count=2,
                                      need_to_be_owner=False).as_dict)

    resp = sdk_send_and_check_get_auth_rule_request(looper, sdk_pool_handle,
                                                    sdk_wallet_trustee)

    assert all(rule[AUTH_TYPE] != fake_txn_type
               for rule in resp[0][1]["result"][DATA])

    with pytest.raises(RequestNackedException,
                       match="not found in authorization map"):
        sdk_send_and_check_get_auth_rule_request(looper,
                                                 sdk_pool_handle,
                                                 sdk_wallet_trustee,
                                                 auth_type=fake_txn_type,
                                                 auth_action=ADD_PREFIX,
                                                 field="*",
                                                 new_value="*")
    # Restart the last node with its state regeneration
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    node_to_stop = txnPoolNodeSet[-1]
    node_state = node_to_stop.states[CONFIG_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name,
                              config_helper=config_helper,
                              config=tconf,
                              pluginPaths=allPluginsPath,
                              ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    txnPoolNodeSet[-1] = restarted_node

    # Check that nodes data is equal (restarted node regenerate config state)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
    sdk_send_and_check_auth_rule_request(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_trustee,
                                         auth_action=ADD_PREFIX,
                                         auth_type=NYM,
                                         field=ROLE,
                                         new_value=STEWARD,
                                         constraint=AuthConstraint(
                                             role=STEWARD,
                                             sig_count=2,
                                             need_to_be_owner=False).as_dict)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20)
def test_new_node_catchup_update_projection(looper,
                                            nodeSet, tconf, tdir,
                                            sdk_pool_handle,
                                            sdk_wallet_trustee,
                                            allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward_wallet, new_node = sdk_node_theta_added(looper,
                                                        nodeSet,
                                                        tdir,
                                                        tconf,
                                                        sdk_pool_handle,
                                                        sdk_wallet_trustee,
                                                        allPluginsPath,
                                                        node_config_helper_class=NodeConfigHelper,
                                                        testNodeClass=TestNode)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                   old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                   old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                   old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            role='TRUST_ANCHOR', alias='TA' + str(i)))
        attributes.append((randomString(6), randomString(10)))
        sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1],
                              *attributes[-1])
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            alias='NP' + str(i)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    config_helper = NodeConfigHelper(new_node.name, tconf, chroot=tdir)
    new_node = TestNode(
        new_node.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=new_node.nodestack.ha,
        cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for wh in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh,
                                                  alias='NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)
Exemple #22
0
def test_new_node_catchup_update_projection(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated, nodeSet,
                                            tconf, trustee, trusteeWallet,
                                            allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward, new_steward_wallet, new_node = nodeThetaAdded(
        looper, nodeSet, tdirWithPoolTxns, tconf, trustee, trusteeWallet,
        allPluginsPath, TestNode, TestClient, tdirWithPoolTxns)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node.name, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'TA' + str(i),
                                   role=TRUST_ANCHOR,
                                   client_connects_to=len(other_nodes)))
        attributes.append((randomString(6), randomString(10)))
        addRawAttribute(looper,
                        *trust_anchors[-1],
                        *attributes[-1],
                        dest=trust_anchors[-1][1].defaultId)
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'NP' + str(i),
                                   client_connects_to=len(other_nodes)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = TestNode(new_node.name,
                        basedirpath=tdirWithPoolTxns,
                        config=tconf,
                        pluginPaths=allPluginsPath,
                        ha=new_node.nodestack.ha,
                        cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    for i, (tc, tw) in enumerate(trust_anchors):
        reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
        all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
                                                reply[f.REQ_ID.nm])
        assertLength(all_replies, len(nodeSet))
        assert new_node.clientstack.name in all_replies

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for tc, tw in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(
                getClientAddedWithRole(other_nodes, tdirWithPoolTxns, looper,
                                       tc, tw, 'NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)
def disconnect_node(looper, node, other_nodes):
    node.stop()
    looper.removeProdable(node)
    ensure_node_disconnected(looper, node, other_nodes)
    check_if_pool_n_minus_f(other_nodes)
def test_new_node_catchup_update_projection(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated,
                                            nodeSet, tconf,
                                            trustee, trusteeWallet,
                                            allPluginsPath,
                                            some_transactions_done
                                            ):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward, new_steward_wallet, new_node = nodeThetaAdded(looper,
                                                               nodeSet,
                                                               tdirWithPoolTxns,
                                                               tconf, trustee,
                                                               trusteeWallet,
                                                               allPluginsPath,
                                                               TestNode,
                                                               TestClient,
                                                               tdirWithPoolTxns)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2*ta_count + np_count   # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node.name, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(getClientAddedWithRole(other_nodes,
                                                    tdirWithPoolTxns, looper,
                                                    trustee, trusteeWallet,
                                                    'TA'+str(i), role=TRUST_ANCHOR,
                                                    client_connects_to=len(other_nodes)))
        attributes.append((randomString(6), randomString(10)))
        addRawAttribute(looper, *trust_anchors[-1], *attributes[-1],
                        dest=trust_anchors[-1][1].defaultId)
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(getClientAddedWithRole(other_nodes,
                                                     tdirWithPoolTxns, looper,
                                                     trustee, trusteeWallet,
                                                     'NP'+str(i),
                                                     client_connects_to=len(other_nodes)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = TestNode(new_node.name, basedirpath=tdirWithPoolTxns,
                        config=tconf, pluginPaths=allPluginsPath,
                        ha=new_node.nodestack.ha, cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    for i, (tc, tw) in enumerate(trust_anchors):
        reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
        all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
                                                reply[f.REQ_ID.nm])
        assertLength(all_replies, len(nodeSet))
        assert new_node.clientstack.name in all_replies

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for tc, tw in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(getClientAddedWithRole(other_nodes,
                                                         tdirWithPoolTxns,
                                                         looper,
                                                         tc, tw,
                                                         'NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count*len(trust_anchors)
    check_sizes(nodeSet)
def test_state_recovery_with_xfer(looper, tconf, tdir,
                                  sdk_pool_handle,
                                  sdk_wallet_trustee,
                                  allPluginsPath,
                                  do_post_node_creation,
                                  nodeSetWithIntegratedTokenPlugin,
                                  helpers,
                                  valid_upgrade,
                                  mint_tokens,
                                  addresses,
                                  fees_set, fees,
                                  monkeypatch):
    version1 = "1.1.50"
    version2 = "1.1.88"
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = 1
    node_set = nodeSetWithIntegratedTokenPlugin

    current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                                             current_amount)
    # send POOL_UPGRADE to write in a ledger
    last_ordered = node_set[0].master_last_ordered_3PC[1]
    sdk_ensure_upgrade_sent(looper, sdk_pool_handle, sdk_wallet_trustee,
                            valid_upgrade)
    looper.run(eventually(lambda: assertEquality(node_set[0].master_last_ordered_3PC[1],
                                                 last_ordered + 1)))

    send_node_upgrades(node_set, version1, looper)
    for n in node_set:
        handler = n.write_manager.request_handlers.get(XFER_PUBLIC)[0]
        handler_for_1_0_0 = n.write_manager._request_handlers_with_version.get((XFER_PUBLIC, "1.0.0"))[0]
        monkeypatch.setattr(handler, 'update_state',
                            handler_for_1_0_0.update_state)

    current_amount, seq_no, _ = send_and_check_transfer(helpers, [addresses[0], addresses[1]], fees_set, looper,
                                                        current_amount, seq_no,
                                                        transfer_summ=current_amount)
    send_node_upgrades(node_set, version2, looper)
    monkeypatch.undo()
    current_amount, seq_no, _ = send_and_check_transfer(helpers, [addresses[1], addresses[0]], fees_set, looper,
                                                        current_amount, seq_no,
                                                        transfer_summ=current_amount)

    node_to_stop = node_set[-1]
    state_db_pathes = [state._kv.db_path
                       for state in node_to_stop.states.values()]
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, node_set[:-1])

    for path in state_db_pathes:
        shutil.rmtree(path)
    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(
        node_to_stop.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=node_to_stop.nodestack.ha,
        cliha=node_to_stop.clientstack.ha)
    do_post_node_creation(restarted_node)

    looper.add(restarted_node)
    node_set[-1] = restarted_node

    looper.run(checkNodesConnected(node_set))
    waitNodeDataEquality(looper, restarted_node, *node_set[:-1], exclude_from_check=['check_last_ordered_3pc_backup'])
    current_amount, seq_no, _ = send_and_check_transfer(helpers, [addresses[0], addresses[1]], {}, looper,
                                                        current_amount, seq_no,
                                                        transfer_summ=1)
    waitNodeDataEquality(looper, restarted_node, *node_set[:-1], exclude_from_check=['check_last_ordered_3pc_backup'])
def disconnect_node(looper, node, other_nodes):
    node.stop()
    looper.removeProdable(node)
    ensure_node_disconnected(looper, node, other_nodes)
    check_if_pool_n_minus_f(other_nodes)
def test_state_regenerated_from_ledger(looper, tdirWithClientPoolTxns,
                                       tdirWithDomainTxnsUpdated, nodeSet,
                                       tconf, tdir, trustee, trusteeWallet,
                                       allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    trust_anchors = []
    for i in range(5):
        trust_anchors.append(
            getClientAddedWithRole(nodeSet,
                                   tdirWithClientPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'TA' + str(i),
                                   role=TRUST_ANCHOR))
        addRawAttribute(looper,
                        *trust_anchors[-1],
                        randomString(6),
                        randomString(10),
                        dest=trust_anchors[-1][1].defaultId)

    for tc, tw in trust_anchors:
        for i in range(3):
            getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc,
                                   tw, 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop.name, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name,
                              config_helper=config_helper,
                              config=tconf,
                              pluginPaths=allPluginsPath,
                              ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for tc, tw in trust_anchors:
        getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc, tw,
                               'NP--{}'.format(tc.name))

    ensure_all_nodes_have_same_data(looper, nodeSet)