Exemplo n.º 1
0
def checkNodeLedgersForEquality(node: TestNode,
                                *otherNodes: Iterable[TestNode]):
    for n in otherNodes:
        assertLength(node.domainLedger, n.domainLedger.size)
        assertLength(node.poolLedger, n.poolLedger.size)
        assertEquality(node.domainLedger.root_hash, n.domainLedger.root_hash)
        assertEquality(node.poolLedger.root_hash, n.poolLedger.root_hash)
Exemplo n.º 2
0
 def chk():
     assertLength([response for response in client1.inBox
                   if (response[0].get(f.RESULT.nm) and
                    response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or
                   (response[0].get(OP_FIELD_NAME) == REQACK and
                    response[0].get(f.REQ_ID.nm) == sent1.reqId)],
                  originalRequestResponsesLen + duplicateRequestRepliesLen)
Exemplo n.º 3
0
 def chk():
     assertLength([response for response in client1.inBox
                   if (response[0].get(f.RESULT.nm) and
                    response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or
                   (response[0].get(OP_FIELD_NAME) == REQACK and
                    response[0].get(f.REQ_ID.nm) == sent1.reqId)],
                  originalRequestResponsesLen + duplicateRequestRepliesLen)
Exemplo n.º 4
0
def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):
    """
    When a request has already been executed the previously executed reply
    will be sent again to the client. An acknowledgement will not be sent
    for a repeated request.
    """
    # Since view no is always zero in the current setup
    looper.run(eventually(checkSufficientRepliesRecvd,
                          client1.inBox,
                          sent1.reqId,
                          2,
                          retryWait=.25,
                          timeout=5))
    orignalRquestResponsesLen = nodeCount * 2
    duplicateRequestRepliesLen = nodeCount  # for a duplicate request we need to
    #  send reply only not any ACK.
    client1._enqueueIntoAllRemotes(sent1)
    # Since view no is always zero in the current setup
    looper.run(eventually(
            lambda: assertLength([response for response in client1.inBox
                                  if response[0]['reqId'] == sent1.reqId],
                                 orignalRquestResponsesLen +
                                 duplicateRequestRepliesLen),
            retryWait=.25,
            timeout=20))
Exemplo n.º 5
0
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func):
    nodeNames = genNodeNames(4)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            nodeA, nodeB, nodeC, nodeD = tuple(
                addNodeBack(nodeSet, looper, nodeNames[i])
                for i in range(0, 4))

            # Nodes C and D delays self nomination so A and B can become
            # primaries
            nodeC.delaySelfNomination(30)
            nodeD.delaySelfNomination(30)

            # Node D delays receiving PRIMARY messages from all nodes so it
            # will not know whether it is primary or not

            # nodeD.nodestack.delay(delayer(20, PRIMARY))

            nodeD.nodeIbStasher.delay(delayerMsgTuple(20, Primary))

            checkPoolReady(looper=looper, nodes=nodeSet)

            client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            request = sendRandomRequest(wal, client1)

            # TODO Rethink this
            instNo = 0

            for i in range(3):
                node = nodeSet.getNode(nodeNames[i])
                # Nodes A, B and C should have received PROPAGATE request
                # from Node D
                looper.run(
                    eventually(checkIfPropagateRecvdFromNode,
                               node,
                               nodeD,
                               request.identifier,
                               request.reqId,
                               retryWait=1,
                               timeout=10))

            # Node D should have 1 pending PRE-PREPARE request
            def assertOnePrePrepare():
                assert len(
                    getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                 PrePrepare)) == 1

            looper.run(eventually(assertOnePrePrepare, retryWait=1,
                                  timeout=10))

            # Node D should have 2 pending PREPARE requests(from node B and C)

            def assertTwoPrepare():
                assert len(
                    getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                 Prepare)) == 2

            looper.run(eventually(assertTwoPrepare, retryWait=1, timeout=10))

            # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT
            # requests
            for reqType in [PrePrepare, Prepare, Commit]:
                looper.run(
                    eventually(lambda: assertLength(
                        getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                     reqType), 0),
                               retryWait=1,
                               timeout=20))
 def chk():
     # A should have forwarded the request
     assertLength(forwardedRequest(A), 1)
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func):
    nodeNames = genNodeNames(4)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            nodeA, nodeB, nodeC, nodeD = tuple(
                addNodeBack(nodeSet, looper, nodeNames[i])
                for i in range(0, 4))

            # Since primary selection is round robin, A and B will be primaries

            # Nodes C and D delays self nomination so A and B can become
            # primaries
            # nodeC.delaySelfNomination(10)
            # nodeD.delaySelfNomination(10)

            # Node D delays receiving PRIMARY messages from all nodes so it
            # will not know whether it is primary or not

            # delayD = 5
            # nodeD.nodeIbStasher.delay(delayerMsgTuple(delayD, Primary))

            checkPoolReady(looper=looper, nodes=nodeSet)

            # client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            # request = sendRandomRequest(wal, client1)

            # TODO Rethink this
            instNo = 0

            timeout = waits.expectedClientRequestPropagationTime(len(nodeSet))
            for i in range(3):
                node = nodeSet.getNode(nodeNames[i])
                # Nodes A, B and C should have received PROPAGATE request
                # from Node D
                looper.run(
                    eventually(checkIfPropagateRecvdFromNode,
                               node,
                               nodeD,
                               request.identifier,
                               request.reqId,
                               retryWait=1,
                               timeout=timeout))

            def assert_msg_count(typ, count):
                assert len(
                    getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                 typ)) == count

            # Node D should have 1 pending PRE-PREPARE request
            timeout = waits.expectedPrePrepareTime(len(nodeSet))
            looper.run(
                eventually(assert_msg_count,
                           PrePrepare,
                           1,
                           retryWait=1,
                           timeout=timeout))

            # Node D should have 2 pending PREPARE requests(from node B and C)
            timeout = waits.expectedPrepareTime(len(nodeSet))
            looper.run(
                eventually(assert_msg_count,
                           Prepare,
                           2,
                           retryWait=1,
                           timeout=timeout))

            # Its been checked above that replica stashes 3 phase messages in
            # lack of primary, now avoid delay (fix the network)
            nodeD.nodeIbStasher.reset_delays_and_process_delayeds()

            # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT
            # requests
            for reqType in [PrePrepare, Prepare, Commit]:
                looper.run(
                    eventually(lambda: assertLength(
                        getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                     reqType), 0),
                               retryWait=1,
                               timeout=delayD))  # wait little more than delay
Exemplo n.º 8
0
def test_new_node_catchup_update_projection(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated, nodeSet,
                                            tconf, trustee, trusteeWallet,
                                            allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward, new_steward_wallet, new_node = nodeThetaAdded(
        looper, nodeSet, tdirWithPoolTxns, tconf, trustee, trusteeWallet,
        allPluginsPath, TestNode, TestClient, tdirWithPoolTxns)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node.name, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'TA' + str(i),
                                   role=TRUST_ANCHOR,
                                   client_connects_to=len(other_nodes)))
        attributes.append((randomString(6), randomString(10)))
        addRawAttribute(looper,
                        *trust_anchors[-1],
                        *attributes[-1],
                        dest=trust_anchors[-1][1].defaultId)
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'NP' + str(i),
                                   client_connects_to=len(other_nodes)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = TestNode(new_node.name,
                        basedirpath=tdirWithPoolTxns,
                        config=tconf,
                        pluginPaths=allPluginsPath,
                        ha=new_node.nodestack.ha,
                        cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    for i, (tc, tw) in enumerate(trust_anchors):
        reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
        all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
                                                reply[f.REQ_ID.nm])
        assertLength(all_replies, len(nodeSet))
        assert new_node.clientstack.name in all_replies

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for tc, tw in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(
                getClientAddedWithRole(other_nodes, tdirWithPoolTxns, looper,
                                       tc, tw, 'NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)
def test_new_node_catchup_update_projection(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated,
                                            nodeSet, tconf,
                                            trustee, trusteeWallet,
                                            allPluginsPath,
                                            some_transactions_done
                                            ):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward, new_steward_wallet, new_node = nodeThetaAdded(looper,
                                                               nodeSet,
                                                               tdirWithPoolTxns,
                                                               tconf, trustee,
                                                               trusteeWallet,
                                                               allPluginsPath,
                                                               TestNode,
                                                               TestClient,
                                                               tdirWithPoolTxns)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2*ta_count + np_count   # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node.name, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(getClientAddedWithRole(other_nodes,
                                                    tdirWithPoolTxns, looper,
                                                    trustee, trusteeWallet,
                                                    'TA'+str(i), role=TRUST_ANCHOR,
                                                    client_connects_to=len(other_nodes)))
        attributes.append((randomString(6), randomString(10)))
        addRawAttribute(looper, *trust_anchors[-1], *attributes[-1],
                        dest=trust_anchors[-1][1].defaultId)
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(getClientAddedWithRole(other_nodes,
                                                     tdirWithPoolTxns, looper,
                                                     trustee, trusteeWallet,
                                                     'NP'+str(i),
                                                     client_connects_to=len(other_nodes)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    new_node = TestNode(new_node.name, basedirpath=tdirWithPoolTxns,
                        config=tconf, pluginPaths=allPluginsPath,
                        ha=new_node.nodestack.ha, cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    for i, (tc, tw) in enumerate(trust_anchors):
        reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
        all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
                                                reply[f.REQ_ID.nm])
        assertLength(all_replies, len(nodeSet))
        assert new_node.clientstack.name in all_replies

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for tc, tw in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(getClientAddedWithRole(other_nodes,
                                                         tdirWithPoolTxns,
                                                         looper,
                                                         tc, tw,
                                                         'NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count*len(trust_anchors)
    check_sizes(nodeSet)
 def chk():
     # A should have forwarded the request
     assertLength(forwardedRequest(A), 1)
Exemplo n.º 11
0
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func):
    nodeNames = genNodeNames(4)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            nodeA, nodeB, nodeC, nodeD = tuple(
                addNodeBack(nodeSet, looper, nodeNames[i]) for i in range(0, 4))

            # Nodes C and D delays self nomination so A and B can become
            # primaries
            nodeC.delaySelfNomination(30)
            nodeD.delaySelfNomination(30)

            # Node D delays receiving PRIMARY messages from all nodes so it
            # will not know whether it is primary or not

            # nodeD.nodestack.delay(delayer(20, PRIMARY))

            nodeD.nodeIbStasher.delay(delayerMsgTuple(20, Primary))

            checkPoolReady(looper=looper, nodes=nodeSet)

            client1 = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            request = sendRandomRequest(client1)

            # TODO Rethink this
            instNo = 0

            for i in range(3):
                node = nodeSet.getNode(nodeNames[i])
                # Nodes A, B and C should have received PROPAGATE request
                # from Node D
                looper.run(
                    eventually(checkIfPropagateRecvdFromNode, node, nodeD,
                               request.identifier,
                               request.reqId, retryWait=1, timeout=10))

            # Node D should have 1 pending PRE-PREPARE request
            def assertOnePrePrepare():
                assert len(getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                        PrePrepare)) == 1

            looper.run(eventually(assertOnePrePrepare, retryWait=1, timeout=10))

            # Node D should have 2 pending PREPARE requests(from node B and C)

            def assertTwoPrepare():
                assert len(getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                        Prepare)) == 2

            looper.run(eventually(assertTwoPrepare, retryWait=1, timeout=10))

            # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT
            # requests
            for reqType in [PrePrepare, Prepare, Commit]:
                looper.run(eventually(lambda: assertLength(
                    getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                 reqType),
                    0), retryWait=1, timeout=20))
Exemplo n.º 12
0
def checkClientPoolLedgerSameAsNodes(client: TestClient,
                                     *nodes: Iterable[TestNode]):
    for n in nodes:
        assertLength(client.ledger, n.poolLedger.size)
        assertEquality(client.ledger.root_hash, n.poolLedger.root_hash)
def testNewIdentifierInWalletIsDid(abbrevIdr):
    assertLength(abbrevIdr, 22)
Exemplo n.º 14
0
def testPrePrepareWhenPrimaryStatusIsUnknown(tdir_for_func):
    nodeNames = genNodeNames(4)
    nodeReg = genNodeReg(names=nodeNames)
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            prepareNodeSet(looper, nodeSet)

            nodeA, nodeB, nodeC, nodeD = tuple(
                addNodeBack(
                    nodeSet, looper, nodeNames[i]) for i in range(
                    0, 4))

            # Since primary selection is round robin, A and B will be primaries

            # Nodes C and D delays self nomination so A and B can become
            # primaries
            # nodeC.delaySelfNomination(10)
            # nodeD.delaySelfNomination(10)

            # Node D delays receiving PRIMARY messages from all nodes so it
            # will not know whether it is primary or not

            # delayD = 5
            # nodeD.nodeIbStasher.delay(delayerMsgTuple(delayD, Primary))

            checkPoolReady(looper=looper, nodes=nodeSet)

            # client1, wal = setupClient(looper, nodeSet, tmpdir=tdir_for_func)
            # request = sendRandomRequest(wal, client1)

            # TODO Rethink this
            instNo = 0

            timeout = waits.expectedClientRequestPropagationTime(len(nodeSet))
            for i in range(3):
                node = nodeSet.getNode(nodeNames[i])
                # Nodes A, B and C should have received PROPAGATE request
                # from Node D
                looper.run(
                    eventually(checkIfPropagateRecvdFromNode, node, nodeD,
                               request.identifier,
                               request.reqId, retryWait=1, timeout=timeout))

            def assert_msg_count(typ, count):
                assert len(getPendingRequestsForReplica(nodeD.replicas[instNo],
                                                        typ)) == count

            # Node D should have 1 pending PRE-PREPARE request
            timeout = waits.expectedPrePrepareTime(len(nodeSet))
            looper.run(eventually(assert_msg_count, PrePrepare, 1,
                                  retryWait=1, timeout=timeout))

            # Node D should have 2 pending PREPARE requests(from node B and C)
            timeout = waits.expectedPrepareTime(len(nodeSet))
            looper.run(eventually(assert_msg_count, Prepare, 2, retryWait=1,
                                  timeout=timeout))

            # Its been checked above that replica stashes 3 phase messages in
            # lack of primary, now avoid delay (fix the network)
            nodeD.nodeIbStasher.reset_delays_and_process_delayeds()

            # Node D should have no pending PRE-PREPARE, PREPARE or COMMIT
            # requests
            for reqType in [PrePrepare, Prepare, Commit]:
                looper.run(
                    eventually(
                        lambda: assertLength(
                            getPendingRequestsForReplica(
                                nodeD.replicas[instNo],
                                reqType),
                            0),
                        retryWait=1,
                        timeout=delayD))  # wait little more than delay