def testNodeCatchupAfterDisconnect(newNodeCaughtUp, txnPoolNodeSet,
                                   nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that disconnects after some transactions should eventually get the
    transactions which happened while it was disconnected
    :return:
    """
    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns

    logger.debug("Stopping node {} with pool ledger size {}".format(
        newNode, newNode.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            newNode,
                                            stopNode=False)
    looper.removeProdable(newNode)

    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    # Make sure new node got out of sync
    waitNodeDataInequality(looper, newNode, *txnPoolNodeSet[:-1])

    logger.debug("Starting the stopped node, {}".format(newNode))
    looper.add(newNode)
    reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, newNode)

    logger.debug("Waiting for the node to catch up, {}".format(newNode))
    waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])

    logger.debug("Sending more requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 10)
    checkNodeDataForEquality(newNode, *txnPoolNodeSet[:-1])
def testNodeCatchupAfterLostConnection(newNodeCaughtUp, txnPoolNodeSet,
                                   nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that has poor internet connection and got unsynced after some
    transactions should eventually get the transactions which happened while
    it was not accessible
    :return:
    """
    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    logger.debug("Disconnecting node {}, ledger size {}".
                 format(newNode, newNode.domainLedger.size))
    disconnect_node_and_ensure_disconnected(looper, txnPoolNodeSet, newNode,
                                            stopNode=False)
    looper.removeProdable(newNode)

    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    # Make sure new node got out of sync
    waitNodeDataInequality(looper, newNode, *txnPoolNodeSet[:-1])

    # logger.debug("Ensure node {} gets disconnected".format(newNode))
    ensure_node_disconnected(looper, newNode, txnPoolNodeSet[:-1])

    logger.debug("Connecting the node {} back, ledger size {}".
                 format(newNode, newNode.domainLedger.size))
    looper.add(newNode)

    logger.debug("Waiting for the node to catch up, {}".format(newNode))
    waitNodeDataEquality(looper, newNode, *txnPoolNodeSet[:-1])

    logger.debug("Sending more requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 10)
    checkNodeDataForEquality(newNode, *txnPoolNodeSet[:-1])
def testNodeCatchupAfterDisconnect(
        sdk_new_node_caught_up, txnPoolNodeSet,
        sdk_node_set_with_node_added_after_some_txns):
    """
    A node that disconnects after some transactions should eventually get the
    transactions which happened while it was disconnected
    :return:
    """
    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_set_with_node_added_after_some_txns

    logger.debug("Disconnecting node {} with pool ledger size {}".format(
        new_node, new_node.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            new_node,
                                            stopNode=False)

    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 5)
    # Make sure new node got out of sync
    waitNodeDataInequality(looper, new_node, *txnPoolNodeSet[:-1])

    logger.debug("Connecting the stopped node, {}".format(new_node))
    reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, new_node)

    logger.debug("Waiting for the node to catch up, {}".format(new_node))
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])

    logger.debug("Sending more requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 10)
    checkNodeDataForEquality(new_node, *txnPoolNodeSet[:-1])
def test_apply_data(fake_node,
                    fake_node_observer,
                    txnPoolNodeSet,
                    observed_data_msgs):
    '''
    - Create a Node (do not add it to the pool) with Observer.
    - Send txns and get ObservedData msgs from Validators.
    - Apply the ObservedData masgs by the Observer and make sure that it becomes synced with the pool.
    '''

    # check that Observer is not synced with the pool
    checkNodeDataForInequality(fake_node,
                               *txnPoolNodeSet,
                               exclude_from_check=['check_last_ordered_3pc',
                                                   'check_seqno_db'])

    # emulate sending of ObserverData from each Node
    for observed_data_msg in observed_data_msgs:
        for node in txnPoolNodeSet:
            fake_node_observer.apply_data(observed_data_msg, node.name)

    # check that Observer is synced with the pool
    checkNodeDataForEquality(fake_node,
                             *txnPoolNodeSet,
                             exclude_from_check=['check_last_ordered_3pc',
                                                 'check_primaries',
                                                 'check_last_ordered_3pc_backup'])
def test_apply_data(fake_node,
                    fake_node_observer,
                    txnPoolNodeSet,
                    observed_data_msgs):
    '''
    - Create a Node (do not add it to the pool) with Observer.
    - Send txns and get ObservedData msgs from Validators.
    - Apply the ObservedData masgs by the Observer and make sure that it becomes synced with the pool.
    '''

    # check that Observer is not synced with the pool
    checkNodeDataForInequality(fake_node,
                               *txnPoolNodeSet,
                               exclude_from_check=['check_last_ordered_3pc',
                                                   'check_seqno_db'])

    # emulate sending of ObserverData from each Node
    for observed_data_msg in observed_data_msgs:
        for node in txnPoolNodeSet:
            fake_node_observer.apply_data(observed_data_msg, node.name)

    # check that Observer is synced with the pool
    checkNodeDataForEquality(fake_node,
                             *txnPoolNodeSet,
                             exclude_from_check=['check_last_ordered_3pc'])
def testNodeCatchupAfterDisconnect(sdk_new_node_caught_up, txnPoolNodeSet,
                                   sdk_node_set_with_node_added_after_some_txns):
    """
    A node that disconnects after some transactions should eventually get the
    transactions which happened while it was disconnected
    :return:
    """
    looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \
        sdk_node_set_with_node_added_after_some_txns

    logger.debug("Disconnecting node {} with pool ledger size {}".
                 format(new_node, new_node.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, new_node, stopNode=False)

    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 5)
    # Make sure new node got out of sync
    waitNodeDataInequality(looper, new_node, *txnPoolNodeSet[:-1])

    logger.debug("Connecting the stopped node, {}".format(new_node))
    reconnect_node_and_ensure_connected(looper, txnPoolNodeSet, new_node)

    logger.debug("Waiting for the node to catch up, {}".format(new_node))
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])

    logger.debug("Sending more requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              new_steward_wallet_handle, 10)
    checkNodeDataForEquality(new_node, *txnPoolNodeSet[:-1])
예제 #7
0
def test_observer_node(txnPoolNodeSet, looper, sdk_pool_handle,
                       sdk_wallet_client):
    '''
    Integration tests checking the full workflow between a real Node Observer and real Node Observables.
    '''
    # exclude Delta from consensus
    observer_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    exclude_from_consensus(observer_node)

    # add Delta as Observer to other nodes
    for node in other_nodes:
        node.add_observer(observer_node.name,
                          ObserverSyncPolicyType.EACH_BATCH)

    # send requests, so that they will be propagated to Observer (Delta)
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 10)

    # check that Delta is in sync with other Nodes.
    checkNodeDataForEquality(observer_node,
                             *other_nodes,
                             exclude_from_check=[
                                 'check_last_ordered_3pc',
                                 'check_last_ordered_3pc_backup'
                             ])
def testNodeCatchupFPlusOne(looper, txnPoolNodeSet, sdk_pool_handle,
                            sdk_wallet_steward, tconf, tdir, tdirWithPoolTxns,
                            allPluginsPath, testNodeClass):
    """
    Check that f+1 nodes is enough for catchup
    """

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".format(
        node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node0,
                                            stopNode=True)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    logger.debug("Stopping node1 with pool ledger size {}".format(
        node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node1,
                                            stopNode=True)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    # Excluding state check since the node is stopped hence the state db is closed
    waitNodeDataInequality(looper,
                           node0,
                           *txnPoolNodeSet[:-2],
                           exclude_from_check=['check_state'])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    nodeHa, nodeCHa = HA(*node0.nodestack.ha), HA(*node0.clientstack.ha)
    config_helper = PNodeConfigHelper(node0.name, tconf, chroot=tdir)
    node0 = testNodeClass(node0.name,
                          config_helper=config_helper,
                          ha=nodeHa,
                          cliha=nodeCHa,
                          config=tconf,
                          pluginPaths=allPluginsPath)
    looper.add(node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
def testNodeCatchupFPlusOne(looper,
                            txnPoolNodeSet,
                            sdk_pool_handle,
                            sdk_wallet_steward,
                            tconf, tdir,
                            tdirWithPoolTxns, allPluginsPath, testNodeClass):
    """
    Check that f+1 nodes is enough for catchup
    """

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".
                 format(node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, node0, stopNode=True)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 5)

    logger.debug("Stopping node1 with pool ledger size {}".
                 format(node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, node1, stopNode=True)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    # Excluding state check since the node is stopped hence the state db is closed
    waitNodeDataInequality(looper, node0, *txnPoolNodeSet[:-2],
                           exclude_from_check=['check_state'])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    nodeHa, nodeCHa = HA(*node0.nodestack.ha), HA(*node0.clientstack.ha)
    config_helper = PNodeConfigHelper(node0.name, tconf, chroot=tdir)
    node0 = testNodeClass(node0.name,
                          config_helper=config_helper,
                          ha=nodeHa, cliha=nodeCHa,
                          config=tconf, pluginPaths=allPluginsPath)
    looper.add(node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
예제 #10
0
def testNodeCatchupFPlusOne(txnPoolNodeSet, poolAfterSomeTxns, tconf, tdir,
                            tdirWithPoolTxns, allPluginsPath, testNodeClass):
    """
    Check that f+1 nodes is enough for catchup
    """
    looper, client, wallet = poolAfterSomeTxns

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".format(
        node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node0,
                                            stopNode=True)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)

    logger.debug("Stopping node1 with pool ledger size {}".format(
        node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node1,
                                            stopNode=True)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    waitNodeDataInequality(looper, node0, *txnPoolNodeSet[:-2])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    nodeHa, nodeCHa = HA(*node0.nodestack.ha), HA(*node0.clientstack.ha)
    config_helper = PNodeConfigHelper(node0.name, tconf, chroot=tdir)
    node0 = testNodeClass(node0.name,
                          config_helper=config_helper,
                          ha=nodeHa,
                          cliha=nodeCHa,
                          config=tconf,
                          pluginPaths=allPluginsPath)
    looper.add(node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
예제 #11
0
def testNodeCatchupFPlusOne(txnPoolNodeSet, poolAfterSomeTxns):
    """
    Check that f+1 nodes is enough for catchup
    """
    looper, client, wallet = poolAfterSomeTxns

    assert len(txnPoolNodeSet) == 4

    node1 = txnPoolNodeSet[-1]
    node0 = txnPoolNodeSet[-2]

    logger.debug("Stopping node0 with pool ledger size {}".
                 format(node0.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, node0, stopNode=False)
    looper.removeProdable(node0)

    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)

    logger.debug("Stopping node1 with pool ledger size {}".
                 format(node1.poolManager.txnSeqNo))
    disconnect_node_and_ensure_disconnected(
        looper, txnPoolNodeSet, node1, stopNode=False)
    looper.removeProdable(node1)

    # Make sure new node got out of sync
    waitNodeDataInequality(looper, node0, *txnPoolNodeSet[:-2])

    # TODO: Check if the node has really stopped processing requests?

    logger.debug("Starting the stopped node0")
    looper.add(node0)
    reconnect_node_and_ensure_connected(looper, txnPoolNodeSet[:-1], node0)

    logger.debug("Waiting for the node0 to catch up")
    waitNodeDataEquality(looper, node0, *txnPoolNodeSet[:-2])

    logger.debug("Sending more requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 2)
    checkNodeDataForEquality(node0, *txnPoolNodeSet[:-2])
def test_observer_node(txnPoolNodeSet,
                       looper,
                       sdk_pool_handle, sdk_wallet_client):
    '''
    Integration tests checking the full workflow between a real Node Observer and real Node Observables.
    '''
    # exclude Delta from consensus
    observer_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    exclude_from_consensus(observer_node)

    # add Delta as Observer to other nodes
    for node in other_nodes:
        node.add_observer(observer_node.name, ObserverSyncPolicyType.EACH_BATCH)

    # send requests, so that they will be propagated to Observer (Delta)
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client,
                              60)

    # check that Delta is in sync with other Nodes.
    checkNodeDataForEquality(observer_node,
                             *other_nodes,
                             exclude_from_check=['check_last_ordered_3pc'])
def test_new_node_catchup_update_projection(looper,
                                            nodeSet, tconf, tdir,
                                            sdk_pool_handle,
                                            sdk_wallet_trustee,
                                            allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward_wallet, new_node = sdk_node_theta_added(looper,
                                                        nodeSet,
                                                        tdir,
                                                        tconf,
                                                        sdk_pool_handle,
                                                        sdk_wallet_trustee,
                                                        allPluginsPath,
                                                        node_config_helper_class=NodeConfigHelper,
                                                        testNodeClass=TestNode)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                   old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                   old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                   old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    # new_node.stop()
    # looper.removeProdable(new_node)
    # ensure_node_disconnected(looper, new_node, other_nodes)

    disconnect_node_and_ensure_disconnected(looper,
                                            nodeSet,
                                            new_node.name)
    looper.removeProdable(name=new_node.name)
    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            role='TRUST_ANCHOR', alias='TA' + str(i)))
        attributes.append((randomString(6), randomString(10)))
        sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1],
                              *attributes[-1])
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            alias='NP' + str(i)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = start_stopped_node(new_node, looper, tconf,
                                  tdir, allPluginsPath)
    nodeSet[-1] = new_node

    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for wh in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh,
                                                  alias='NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)
예제 #14
0
def test_new_node_catchup_update_projection(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated, nodeSet,
                                            tconf, trustee, trusteeWallet,
                                            allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward, new_steward_wallet, new_node = nodeThetaAdded(
        looper, nodeSet, tdirWithPoolTxns, tconf, trustee, trusteeWallet,
        allPluginsPath, TestNode, TestClient, tdirWithPoolTxns)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node.name, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'TA' + str(i),
                                   role=TRUST_ANCHOR,
                                   client_connects_to=len(other_nodes)))
        attributes.append((randomString(6), randomString(10)))
        addRawAttribute(looper,
                        *trust_anchors[-1],
                        *attributes[-1],
                        dest=trust_anchors[-1][1].defaultId)
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'NP' + str(i),
                                   client_connects_to=len(other_nodes)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = TestNode(new_node.name,
                        basedirpath=tdirWithPoolTxns,
                        config=tconf,
                        pluginPaths=allPluginsPath,
                        ha=new_node.nodestack.ha,
                        cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    for i, (tc, tw) in enumerate(trust_anchors):
        reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
        all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
                                                reply[f.REQ_ID.nm])
        assertLength(all_replies, len(nodeSet))
        assert new_node.clientstack.name in all_replies

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for tc, tw in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(
                getClientAddedWithRole(other_nodes, tdirWithPoolTxns, looper,
                                       tc, tw, 'NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)
def test_new_node_catchup_update_projection(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated,
                                            nodeSet, tconf,
                                            trustee, trusteeWallet,
                                            allPluginsPath,
                                            some_transactions_done
                                            ):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward, new_steward_wallet, new_node = nodeThetaAdded(looper,
                                                               nodeSet,
                                                               tdirWithPoolTxns,
                                                               tconf, trustee,
                                                               trusteeWallet,
                                                               allPluginsPath,
                                                               TestNode,
                                                               TestClient,
                                                               tdirWithPoolTxns)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2*ta_count + np_count   # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node.name, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(getClientAddedWithRole(other_nodes,
                                                    tdirWithPoolTxns, looper,
                                                    trustee, trusteeWallet,
                                                    'TA'+str(i), role=TRUST_ANCHOR,
                                                    client_connects_to=len(other_nodes)))
        attributes.append((randomString(6), randomString(10)))
        addRawAttribute(looper, *trust_anchors[-1], *attributes[-1],
                        dest=trust_anchors[-1][1].defaultId)
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(getClientAddedWithRole(other_nodes,
                                                     tdirWithPoolTxns, looper,
                                                     trustee, trusteeWallet,
                                                     'NP'+str(i),
                                                     client_connects_to=len(other_nodes)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = TestNode(new_node.name, basedirpath=tdirWithPoolTxns,
                        config=tconf, pluginPaths=allPluginsPath,
                        ha=new_node.nodestack.ha, cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    for i, (tc, tw) in enumerate(trust_anchors):
        reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
        all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
                                                reply[f.REQ_ID.nm])
        assertLength(all_replies, len(nodeSet))
        assert new_node.clientstack.name in all_replies

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for tc, tw in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(getClientAddedWithRole(other_nodes,
                                                         tdirWithPoolTxns,
                                                         looper,
                                                         tc, tw,
                                                         'NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count*len(trust_anchors)
    check_sizes(nodeSet)
def test_new_node_catchup_update_projection(looper,
                                            nodeSet, tconf, tdir,
                                            sdk_pool_handle,
                                            sdk_wallet_trustee,
                                            allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward_wallet, new_node = sdk_node_theta_added(looper,
                                                        nodeSet,
                                                        tdir,
                                                        tconf,
                                                        sdk_pool_handle,
                                                        sdk_wallet_trustee,
                                                        allPluginsPath,
                                                        node_config_helper_class=NodeConfigHelper,
                                                        testNodeClass=TestNode)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                   old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                   old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                   old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    # new_node.stop()
    # looper.removeProdable(new_node)
    # ensure_node_disconnected(looper, new_node, other_nodes)

    disconnect_node_and_ensure_disconnected(looper,
                                            nodeSet,
                                            new_node.name)
    looper.removeProdable(name=new_node.name)
    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            role='TRUST_ANCHOR', alias='TA' + str(i)))
        attributes.append((randomString(6), randomString(10)))
        sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1],
                              *attributes[-1])
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            alias='NP' + str(i)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = start_stopped_node(new_node, looper, tconf,
                                  tdir, allPluginsPath)
    nodeSet[-1] = new_node

    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    sdk_pool_refresh(looper, sdk_pool_handle)
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for wh in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh,
                                                  alias='NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)