Exemplo n.º 1
0
def testNodesComingUpAtDifferentTimes():
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    with TemporaryDirectory() as td:
        print("temporary directory: {}".format(td))
        with Looper() as looper:
            nodes = []

            names = list(nodeReg.keys())
            shuffle(names)
            waits = [randint(1, 10) for _ in names]
            rwaits = [randint(1, 10) for _ in names]

            for i, name in enumerate(names):
                node = TestNode(name, nodeReg, basedirpath=td)
                looper.add(node)
                node.startKeySharing()
                nodes.append(node)
                looper.runFor(waits[i])
            looper.run(checkNodesConnected(nodes, overrideTimeout=10))
            print("connects")
            print("node order: {}".format(names))
            print("waits: {}".format(waits))

            for n in nodes:
                n.stop()
            for i, n in enumerate(nodes):
                n.start(looper.loop)
                looper.runFor(rwaits[i])
            looper.runFor(3)
            looper.run(checkNodesConnected(nodes, overrideTimeout=10))
            print("reconnects")
            print("node order: {}".format(names))
            print("rwaits: {}".format(rwaits))
Exemplo n.º 2
0
def testPrimaryElectionCase2(case2Setup, looper, keySharedNodes):
    """
    Case 2 - A node making nominations for a multiple other nodes. Consider 4
    nodes A, B, C, and D. Lets say node B is malicious and nominates node C
    to all nodes. Again node B nominates node D to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    # Node B sends multiple NOMINATE msgs but only after A has nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=.25,
                          timeout=1))

    instId = getSelfNominationByNode(A)

    BRep = Replica.generateName(B.name, instId)
    CRep = Replica.generateName(C.name, instId)
    DRep = Replica.generateName(D.name, instId)

    # Node B first sends NOMINATE msgs for Node C to all nodes
    B.send(Nomination(CRep, instId, B.viewNo))
    # Node B sends NOMINATE msgs for Node D to all nodes
    B.send(Nomination(DRep, instId, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so
    # not considering it) should have nomination for node C from node B since
    #  node B first nominated node C
    for node in [A, C, D]:
        assert node.elector.nominations[instId][BRep] == CRep
Exemplo n.º 3
0
def testPrimaryElectionCase4(case4Setup, looper):
    """
    Case 4 - A node making multiple primary declarations for a particular node.
    Consider 4 nodes A, B, C and D. Lets say node B is malicious and is
    repeatedly declaring Node D as primary
    """
    allNodes = case4Setup
    A, B, C, D = allNodes

    looper.run(checkNodesConnected(allNodes))

    # Node B sends multiple declarations of node D's 0th protocol instance as
    # primary to all nodes
    for i in range(5):
        B.send(Primary(D.name, 0, B.viewNo))

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one primary declaration for node
    # D since node D is slow. The one primary declaration for node D,
    # that nodes A, C and D might have would be because of node B
    def x():
        primDecs = list(node.elector.primaryDeclarations[0].values())
        assert primDecs.count(D.name) <= 1

    for node in (A, C, D):
        looper.run(eventually(x, retryWait=.5, timeout=2))

    ensureElectionsDone(looper=looper, nodes=allNodes,
                        retryWait=1, timeout=45)

    # Node D should not have any primary replica
    assert not D.hasPrimary
Exemplo n.º 4
0
def testNodeCatchupAfterRestart(newNodeCaughtUp, txnPoolNodeSet,
                                nodeSetWithNodeAddedAfterSomeTxns):
    """
    A node that restarts after some transactions should eventually get the
    transactions which happened while it was down
    :return:
    """

    looper, newNode, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns
    logger.debug("Stopping node {} with pool ledger size {}".format(
        newNode, newNode.poolManager.txnSeqNo))
    ensureNodeDisconnectedFromPool(looper, txnPoolNodeSet, newNode)
    # for n in txnPoolNodeSet[:4]:
    #     for r in n.nodestack.remotes.values():
    #         if r.name == newNode.name:
    #             r.removeStaleCorrespondents()
    # looper.run(eventually(checkNodeDisconnectedFrom, newNode.name,
    #                       txnPoolNodeSet[:4], retryWait=1, timeout=5))
    # TODO: Check if the node has really stopped processing requests?
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 5)
    logger.debug("Starting the stopped node, {}".format(newNode))
    newNode.start(looper.loop)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   newNode,
                   *txnPoolNodeSet[:4],
                   retryWait=1,
                   timeout=15))
def testPrimaryElectionCase5(case5Setup, looper, keySharedNodes):
    """
    Case 5 - A node making primary declarations for a multiple other nodes.
    Consider 4 nodes A, B, C, and D. Lets say node B is malicious and
    declares node C as primary to all nodes.
    Again node B declares node D as primary to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    BRep = Replica.generateName(B.name, 0)
    CRep = Replica.generateName(C.name, 0)
    DRep = Replica.generateName(D.name, 0)

    # Node B first sends PRIMARY msgs for Node C to all nodes
    B.send(Primary(CRep, 0, B.viewNo))
    # Node B sends PRIMARY msgs for Node D to all nodes
    B.send(Primary(DRep, 0, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have primary declarations for node C from node B
    #  since node B first nominated node C
    for node in [A, C, D]:
        logging.debug(
            "node {} should have primary declaration for C from node B"
            .format(node))
        assert node.elector.primaryDeclarations[0][BRep] == CRep
Exemplo n.º 6
0
def testNodeRequestingTxns(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    """
    A newly joined node is catching up and sends catchup requests to other
    nodes but one of the nodes does not reply and the newly joined node cannot
    complete the process till the timeout and then requests the missing
    transactions.
    """
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns
    # So nodes wont tell the clients about the newly joined node so they
    # dont send any request to the newly joined node
    for node in txnPoolNodeSet:
        node.sendPoolInfoToClients = types.MethodType(lambda x, y: None, node)

    txnPoolNodeSet.append(newNode)

    def ignoreCatchupReq(self, req, frm):
        logger.info("{} being malicious and ignoring catchup request {} "
                    "from {}".format(self, req, frm))

    # One of the node does not process catchup request.
    txnPoolNodeSet[0].nodeMsgRouter.routes[CatchupReq] = types.MethodType(
        ignoreCatchupReq, txnPoolNodeSet[0].ledgerManager)
    sendRandomRequests(wallet, client, 10)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=60))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   newNode,
                   *txnPoolNodeSet[:-1],
                   retryWait=1,
                   timeout=90))
def testPrimaryElectionCase2(case2Setup, looper, keySharedNodes):
    """
    Case 2 - A node making nominations for a multiple other nodes. Consider 4
    nodes A, B, C, and D. Lets say node B is malicious and nominates node C
    to all nodes. Again node B nominates node D to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    # Node B sends multiple NOMINATE msgs but only after A has nominated itself
    looper.run(eventually(checkNomination, A, A.name, retryWait=.25, timeout=1))

    instId = getSelfNominationByNode(A)

    BRep = Replica.generateName(B.name, instId)
    CRep = Replica.generateName(C.name, instId)
    DRep = Replica.generateName(D.name, instId)

    # Node B first sends NOMINATE msgs for Node C to all nodes
    B.send(Nomination(CRep, instId, B.viewNo))
    # Node B sends NOMINATE msgs for Node D to all nodes
    B.send(Nomination(DRep, instId, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so
    # not considering it) should have nomination for node C from node B since
    #  node B first nominated node C
    for node in [A, C, D]:
        assert node.elector.nominations[instId][BRep] == CRep
Exemplo n.º 8
0
def testNodesConnectsWhenOneNodeIsLate(allPluginsPath, tdirAndLooper):
    tdir, looper = tdirAndLooper
    nodes = []
    names = list(nodeReg.keys())
    logger.debug("Node names: {}".format(names))

    def create(name):
        node = TestNode(name, nodeReg, basedirpath=tdir,
                        pluginPaths=allPluginsPath)
        looper.add(node)
        node.startKeySharing()
        nodes.append(node)

    for name in names[:3]:
        create(name)

    looper.run(checkNodesConnected(nodes))

    # wait for the election to complete with the first three nodes
    looper.runFor(10)

    # create the fourth and see that it learns who the primaries are
    # from the other nodes
    create(names[3])
    checkProtocolInstanceSetup(looper, nodes, timeout=10)
Exemplo n.º 9
0
def testPrimaryElectionCase5(case5Setup, looper, keySharedNodes):
    """
    Case 5 - A node making primary declarations for a multiple other nodes.
    Consider 4 nodes A, B, C, and D. Lets say node B is malicious and
    declares node C as primary to all nodes.
    Again node B declares node D as primary to all nodes.
    """
    nodeSet = keySharedNodes
    A, B, C, D = nodeSet.nodes.values()

    looper.run(checkNodesConnected(nodeSet))

    BRep = Replica.generateName(B.name, 0)
    CRep = Replica.generateName(C.name, 0)
    DRep = Replica.generateName(D.name, 0)

    # Node B first sends PRIMARY msgs for Node C to all nodes
    B.send(Primary(CRep, 0, B.viewNo))
    # Node B sends PRIMARY msgs for Node D to all nodes
    B.send(Primary(DRep, 0, B.viewNo))

    # Ensure elections are done
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1, timeout=45)

    # All nodes from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have primary declarations for node C from node B
    #  since node B first nominated node C
    for node in [A, C, D]:
        logger.debug(
            "node {} should have primary declaration for C from node B"
            .format(node))
        assert node.elector.primaryDeclarations[0][BRep] == CRep
Exemplo n.º 10
0
def testNodeRemoveUnknownRemote(allPluginsPath, tdirAndLooper):
    """
    The nodes Alpha and Beta know about each other so they should connect but
    they should remove remote for C when it tries to connect to them
    """

    tdir, looper = tdirAndLooper
    names = ["Alpha", "Beta"]
    logger.debug(names)
    nrg = {n: nodeReg[n] for n in names}
    A, B = [TestNode(name, nrg, basedirpath=tdir,
                     pluginPaths=allPluginsPath)
            for name in names]
    for node in (A, B):
        looper.add(node)
        node.startKeySharing()
    looper.run(checkNodesConnected([A, B]))

    C = TestNode("Gamma", {**nrg, **{"Gamma": nodeReg["Gamma"]}},
                 basedirpath=tdir, pluginPaths=allPluginsPath)
    looper.add(C)
    C.startKeySharing(timeout=20)

    def chk():
        assert not C.nodestack.isKeySharing

    looper.run(eventually(chk, retryWait=2, timeout=21))
    C.stop()

    def chk():
        assert C.name not in B.nodestack.nameRemotes
        assert C.name not in A.nodestack.nameRemotes

    looper.run(eventually(chk, retryWait=2, timeout=5))
Exemplo n.º 11
0
def testTestNodeDelay(tdir_for_func):
    nodeNames = {"testA", "testB"}
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodes:
        nodeA = nodes.getNode("testA")
        nodeB = nodes.getNode("testB")

        with Looper(nodes) as looper:
            for n in nodes:
                n.startKeySharing()

            logging.debug("connect")
            looper.run(checkNodesConnected(nodes))
            logging.debug("send one message, without delay")
            msg = randomMsg()
            looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 1))
            logging.debug("set delay, then send another message and find that "
                          "it doesn't arrive")
            msg = randomMsg()

            nodeB.nodeIbStasher.delay(delayerMsgTuple(6, type(msg),
                                                      nodeA.name))

            with pytest.raises(AssertionError):
                looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 3))
            logging.debug("but then find that it arrives after the delay "
                          "duration has passed")
            looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 4))
            logging.debug(
                "reset the delay, and find another message comes quickly")
            nodeB.nodeIbStasher.resetDelays()
            msg = randomMsg()
            looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 1))
Exemplo n.º 12
0
def testNodesConnectsWhenOneNodeIsLate():
    with TemporaryDirectory() as td:
        with Looper() as looper:
            nodes = []
            names = list(nodeReg.keys())
            logger.debug("Node names: {}".format(names))

            def create(name):
                node = Node(name, nodeReg, basedirpath=td)
                looper.add(node)
                node.startKeySharing()
                nodes.append(node)

            for name in names[:3]:
                create(name)

            looper.run(checkNodesConnected(nodes))

            # wait for the election to complete with the first three nodes
            looper.runFor(10)

            # create the fourth and see that it learns who the primaries are
            # from the other nodes
            create(names[3])
            checkProtocolInstanceSetup(looper, nodes, timeout=10)
Exemplo n.º 13
0
def testTestNodeDelay(tdir_for_func):
    nodeNames = {"testA", "testB"}
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodes:
        nodeA = nodes.getNode("testA")
        nodeB = nodes.getNode("testB")

        with Looper(nodes) as looper:
            for n in nodes:
                n.startKeySharing()

            logging.debug("connect")
            looper.run(checkNodesConnected(nodes))
            logging.debug("send one message, without delay")
            msg = randomMsg()
            looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 1))
            logging.debug("set delay, then send another message and find that "
                          "it doesn't arrive")
            msg = randomMsg()

            nodeB.nodeIbStasher.delay(delayerMsgTuple(6, type(msg), nodeA.name))

            with pytest.raises(AssertionError):
                looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 3))
            logging.debug("but then find that it arrives after the delay "
                          "duration has passed")
            looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 4))
            logging.debug(
                    "reset the delay, and find another message comes quickly")
            nodeB.nodeIbStasher.resetDelays()
            msg = randomMsg()
            looper.run(sendMsgAndCheck(nodes, nodeA, nodeB, msg, 1))
Exemplo n.º 14
0
def nodeSetWithNodeAddedAfterSomeTxns(txnPoolNodeSet,
                                      nodeCreatedAfterSomeTxns):
    looper, newNode, client, wallet, newStewardClient, newStewardWallet = \
        nodeCreatedAfterSomeTxns
    txnPoolNodeSet.append(newNode)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=10))
    looper.run(newStewardClient.ensureConnectedToNodes())
    looper.run(client.ensureConnectedToNodes())
    return looper, newNode, client, wallet, newStewardClient, newStewardWallet
Exemplo n.º 15
0
def testKeyShareParty(tdir_for_func):
    """
    connections to all nodes should be successfully established when key
    sharing is enabled.
    """
    nodeReg = genNodeReg(5)

    logging.debug("-----sharing keys-----")
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            for n in nodeSet:
                n.startKeySharing()
            looper.run(checkNodesConnected(nodeSet))

    logging.debug("-----key sharing done, connect after key sharing-----")
    with TestNodeSet(nodeReg=nodeReg, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as loop:
            loop.run(checkNodesConnected(nodeSet), msgAll(nodeSet))
Exemplo n.º 16
0
def testNodesConnectWhenTheyAllStartAtOnce(allPluginsPath, tdirAndLooper):
    tdir, looper = tdirAndLooper
    nodes = []
    for name in nodeReg:
        node = TestNode(name, nodeReg, basedirpath=tdir,
                        pluginPaths=allPluginsPath)
        looper.add(node)
        node.startKeySharing()
        nodes.append(node)
    looper.run(checkNodesConnected(nodes))
Exemplo n.º 17
0
def testNodesConnectWhenTheyAllStartAtOnce():
    with TemporaryDirectory() as td:
        with Looper() as looper:
            nodes = []
            for name in nodeReg:
                node = Node(name, nodeReg, basedirpath=td)
                looper.add(node)
                node.startKeySharing()
                nodes.append(node)
            looper.run(checkNodesConnected(nodes))
Exemplo n.º 18
0
def testNodesConnectWhenTheyAllStartAtOnce():
    with TemporaryDirectory() as td:
        with Looper() as looper:
            nodes = []
            for name in nodeReg:
                node = TestNode(name, nodeReg, basedirpath=td)
                looper.add(node)
                node.startKeySharing()
                nodes.append(node)
            looper.run(checkNodesConnected(nodes))
Exemplo n.º 19
0
def testKeyShareParty(tdir_for_func):
    """
    connections to all nodes should be successfully established when key
    sharing is enabled.
    """
    nodeReg = genNodeReg(5)

    logging.debug("-----sharing keys-----")
    with TestNodeSet(nodeReg=nodeReg,
                     tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            for n in nodeSet:
                n.startKeySharing()
            looper.run(checkNodesConnected(nodeSet))

    logging.debug("-----key sharing done, connect after key sharing-----")
    with TestNodeSet(nodeReg=nodeReg,
                     tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as loop:
            loop.run(checkNodesConnected(nodeSet),
                     msgAll(nodeSet))
def testNodeRequestingConsProof(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    """
    All of the 4 old nodes delay the processing of LEDGER_STATUS from the newly
    joined node while they are processing requests which results in them sending
    consistency proofs which are not same so that the newly joined node cannot
    conclude about the state of transactions in the system. So the new node
    requests consistency proof for a particular range from all nodes.
    """
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns

    # So nodes wont tell the clients about the newly joined node so they
    # dont send any request to the newly joined node
    for node in txnPoolNodeSet:
        node.sendPoolInfoToClients = types.MethodType(lambda x, y: None, node)

    txnPoolNodeSet.append(newNode)
    # The new node does not sends different ledger statuses to every node so it
    # does not get enough similar consistency proofs
    sentSizes = set()

    def sendDLStatus(self, name):
        size = self.primaryStorage.size
        newSize = randint(1, size)
        while newSize in sentSizes:
            newSize = randint(1, size)
        print("new size {}".format(newSize))
        newRootHash = base64.b64encode(
            self.domainLedger.tree.merkle_tree_hash(0, newSize)).decode()
        ledgerStatus = LedgerStatus(1, newSize, newRootHash)

        print("dl status {}".format(ledgerStatus))
        rid = self.nodestack.getRemote(name).uid
        self.send(ledgerStatus, rid)
        sentSizes.add(newSize)

    newNode.sendDomainLedgerStatus = types.MethodType(sendDLStatus, newNode)

    print("sending 10 requests")
    sendRandomRequests(wallet, client, 10)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=60))

    # `ConsistencyProofsTimeout` is set to 60 sec, so need to wait more than
    # 60 sec.
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   newNode,
                   *txnPoolNodeSet[:-1],
                   retryWait=1,
                   timeout=75))
    for node in txnPoolNodeSet[:-1]:
        assert node.ledgerManager.spylog.count(
            TestLedgerManager.processConsistencyProofReq.__name__) > 0
Exemplo n.º 21
0
def testNodeConnection(allPluginsPath, tdirAndLooper):
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    tdir, looper = tdirAndLooper
    names = ["Alpha", "Beta"]
    logger.debug(names)
    nrg = {n: nodeReg[n] for n in names}
    A, B = [TestNode(name, nrg, basedirpath=tdir,
                     pluginPaths=allPluginsPath)
            for name in names]
    looper.add(A)
    A.startKeySharing()
    looper.runFor(4)
    logger.debug("wait done")
    looper.add(B)
    B.startKeySharing()
    looper.runFor(4)
    looper.run(checkNodesConnected([A, B]))
    looper.stopall()
    A.start(looper.loop)
    looper.runFor(4)
    B.start(looper.loop)
    looper.run(checkNodesConnected([A, B]))
Exemplo n.º 22
0
def txnPoolNodeSet(tdirWithPoolTxns, tdirWithDomainTxns, tconf,
                   poolTxnNodeNames, allPluginsPath, tdirWithNodeKeepInited,
                   testNodeClass):
    with Looper(debug=True) as looper:
        nodes = []
        for nm in poolTxnNodeNames:
            node = testNodeClass(nm,
                                 basedirpath=tdirWithPoolTxns,
                                 config=tconf,
                                 pluginPaths=allPluginsPath)
            looper.add(node)
            nodes.append(node)
        looper.run(checkNodesConnected(nodes))
        yield nodes
Exemplo n.º 23
0
def testNodeConnection():
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    with TemporaryDirectory() as td:
        print("temporary directory: {}".format(td))
        with Looper() as looper:
            names = ["Alpha", "Beta"]
            print(names)
            nrg = {n: nodeReg[n] for n in names}
            A, B = [TestNode(name, nrg, basedirpath=td) for name in names]
            looper.add(A)
            A.startKeySharing()
            looper.runFor(4)
            print("wait done")
            looper.add(B)
            B.startKeySharing()
            looper.runFor(4)
            looper.run(checkNodesConnected([A, B]))
            looper.stopall()
            A.start(looper.loop)
            looper.runFor(4)
            B.start(looper.loop)
            looper.run(checkNodesConnected([A, B]))
Exemplo n.º 24
0
def testPrimaryElectionCase1(case1Setup, looper, keySharedNodes):
    """
    Case 1 - A node making multiple nominations for a particular node. Consider
    4 nodes A, B, C and D. Lets say node B is malicious and is repeatedly
    nominating Node D
    """
    nodes = keySharedNodes
    nodeA, nodeB, nodeC, nodeD = [nodes.getNode(nm) for nm in nodes.nodeNames]

    # Doesn't matter if nodes reach the ready state or not. Just start them
    looper.run(checkNodesConnected(nodes))

    # Node B sends multiple NOMINATE msgs for Node D but only after A has
    # nominated itself
    looper.run(
        eventually(checkNomination,
                   nodeA,
                   nodeA.name,
                   retryWait=.25,
                   timeout=1))

    instId = getSelfNominationByNode(nodeA)

    for i in range(5):
        nodeB.send(Nomination(nodeD.name, instId, nodeB.viewNo))
    nodeB.nodestack.flushOutBoxes()

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one nomination for node D since
    # node D is slow. The one nomination for D, that nodes A, C
    # and D might have would be because of node B
    for node in [nodeA, nodeC, nodeD]:
        assert list(node.elector.nominations[instId].values()).count(
            Replica.generateName(nodeD.name, instId)) \
               <= 1

    primaryReplicas = ensureElectionsDone(looper=looper,
                                          nodes=nodes,
                                          retryWait=1,
                                          timeout=30)

    for node in nodes:
        logger.debug("{}'s nominations {}".format(node,
                                                  node.elector.nominations))
    # Node D should not have any primary
    assert not nodeD.hasPrimary
    # A node other than Node D should not have any replica among the
    # primary replicas
    assert nodeD.name not in [pr.name for pr in primaryReplicas]
Exemplo n.º 25
0
def testNodeConnection():
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    with TemporaryDirectory() as td:
        print("temporary directory: {}".format(td))
        with Looper() as looper:
            names = ["Alpha", "Beta"]
            print(names)
            nrg = {n: nodeReg[n] for n in names}
            A, B = [Node(name, nrg, basedirpath=td)
                    for name in names]
            looper.add(A)
            A.startKeySharing()
            looper.runFor(4)
            print("wait done")
            looper.add(B)
            B.startKeySharing()
            looper.runFor(4)
            looper.run(checkNodesConnected([A, B]))
            looper.stopall()
            A.start()
            looper.runFor(4)
            B.start()
            looper.run(checkNodesConnected([A, B]))
Exemplo n.º 26
0
def testNodesComingUpAtDifferentTimes(allPluginsPath, tdirAndLooper):
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    tdir, looper = tdirAndLooper

    nodes = []

    names = list(nodeReg.keys())
    shuffle(names)
    waits = [randint(1, 10) for _ in names]
    rwaits = [randint(1, 10) for _ in names]

    for i, name in enumerate(names):
        node = TestNode(name, nodeReg, basedirpath=tdir,
                        pluginPaths=allPluginsPath)
        looper.add(node)
        node.startKeySharing()
        nodes.append(node)
        looper.runFor(waits[i])
    looper.run(checkNodesConnected(nodes,
                                   overrideTimeout=10))
    logger.debug("connects")
    logger.debug("node order: {}".format(names))
    logger.debug("waits: {}".format(waits))

    for n in nodes:
        n.stop()
    for i, n in enumerate(nodes):
        n.start(looper.loop)
        looper.runFor(rwaits[i])
    looper.runFor(3)
    looper.run(checkNodesConnected(nodes,
                                   overrideTimeout=10))
    logger.debug("reconnects")
    logger.debug("node order: {}".format(names))
    logger.debug("rwaits: {}".format(rwaits))
Exemplo n.º 27
0
def testNodesComingUpAtDifferentTimes():
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    with TemporaryDirectory() as td:
        print("temporary directory: {}".format(td))
        with Looper() as looper:
            nodes = []

            names = list(nodeReg.keys())
            shuffle(names)
            waits = [randint(1, 10) for _ in names]
            rwaits = [randint(1, 10) for _ in names]

            for i, name in enumerate(names):
                node = Node(name, nodeReg, basedirpath=td)
                looper.add(node)
                node.startKeySharing()
                nodes.append(node)
                looper.runFor(waits[i])
            looper.run(checkNodesConnected(nodes,
                                           overrideTimeout=10))
            print("connects")
            print("node order: {}".format(names))
            print("waits: {}".format(waits))

            for n in nodes:
                n.stop()
            for i, n in enumerate(nodes):
                n.start()
                looper.runFor(rwaits[i])
            looper.runFor(3)
            looper.run(checkNodesConnected(nodes,
                                           overrideTimeout=10))
            print("reconnects")
            print("node order: {}".format(names))
            print("rwaits: {}".format(rwaits))
Exemplo n.º 28
0
def nodeStashingOrderedRequests(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns
    for node in txnPoolNodeSet:
        node.nodeIbStasher.delay(crDelay(5))
    txnPoolNodeSet.append(newNode)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, client,
                                                  *txnPoolNodeSet[:-1])
    sendRandomRequests(wallet, client, 10)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=15))

    def stashing():
        assert newNode.mode != Mode.participating
        assert len(newNode.stashedOrderedReqs) > 0
        assert len(newNode.reqsFromCatchupReplies) > 0

    looper.run(eventually(stashing, retryWait=1, timeout=20))
def testNodeRejectingInvalidTxns(txnPoolNodeSet, nodeCreatedAfterSomeTxns):
    """
    A newly joined node is catching up and sends catchup requests to other
    nodes but one of the nodes replies with incorrect transactions. The newly
    joined node detects that and rejects the transactions and thus blacklists
    the node. Ii thus cannot complete the process till the timeout and then
    requests the missing transactions.
    """
    looper, newNode, client, wallet, _, _ = nodeCreatedAfterSomeTxns

    # So nodes wont tell the clients about the newly joined node so they
    # dont send any request to the newly joined node
    for node in txnPoolNodeSet:
        node.sendPoolInfoToClients = types.MethodType(lambda x, y: None, node)

    def sendIncorrectTxns(self, req, frm):
        ledgerType = getattr(req, f.LEDGER_TYPE.nm)
        if ledgerType == 1:
            logger.info("{} being malicious and sending incorrect transactions"
                        " for catchup request {} from {}".
                        format(self, req, frm))
            start, end = getattr(req, f.SEQ_NO_START.nm), \
                         getattr(req, f.SEQ_NO_END.nm)
            ledger = self.getLedgerForMsg(req)
            txns = ledger.getAllTxn(start, end)
            for seqNo in txns.keys():
                # Since the type of random request is `buy`
                if txns[seqNo].get(TXN_TYPE) == "buy":
                    txns[seqNo][TXN_TYPE] = "randomtype"
            consProof = [b64encode(p).decode() for p in
                     ledger.tree.consistency_proof(end, ledger.size)]
            self.sendTo(msg=CatchupRep(getattr(req, f.LEDGER_TYPE.nm), txns,
                                       consProof), to=frm)
        else:
            self.processCatchupReq(req, frm)

    # One of the node does not process catchup request.
    txnPoolNodeSet[0].nodeMsgRouter.routes[CatchupReq] = types.MethodType(
        sendIncorrectTxns, txnPoolNodeSet[0].ledgerManager)

    sendRandomRequests(wallet, client, 10)
    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=60))
    looper.run(eventually(checkNodeLedgersForEquality, newNode,
                          *txnPoolNodeSet[:-1], retryWait=1, timeout=45))

    assert newNode.isNodeBlacklisted(txnPoolNodeSet[0].name)
Exemplo n.º 30
0
def testConnectWithoutKeySharingFails(tdir_for_func):
    """
    attempts at connecting to nodes when key sharing is disabled must fail
    """
    nodeNames = genNodeNames(5)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func,
                     keyshare=False) as nodes:
        with Looper(nodes) as looper:
            try:
                looper.run(
                    checkNodesConnected(nodes, RemoteState(None, None, None)))
            except RemoteNotFound:
                pass
            except KeyError as ex:
                assert [n for n in nodeNames if n == ex.args[0]]
            except Exception:
                raise
def testPrimaryElectionCase1(case1Setup, looper, keySharedNodes):
    """
    Case 1 - A node making multiple nominations for a particular node. Consider
    4 nodes A, B, C and D. Lets say node B is malicious and is repeatedly
    nominating Node D
    """
    nodes = keySharedNodes
    nodeA, nodeB, nodeC, nodeD = [nodes.getNode(nm) for nm in nodes.nodeNames]

    # Doesn't matter if nodes reach the ready state or not. Just start them
    looper.run(checkNodesConnected(nodes))

    # Node B sends multiple NOMINATE msgs for Node D but only after A has
    # nominated itself
    looper.run(eventually(checkNomination, nodeA, nodeA.name, retryWait=.25,
                          timeout=1))

    instId = getSelfNominationByNode(nodeA)

    for i in range(5):
        nodeB.send(Nomination(nodeD.name, instId, nodeB.viewNo))
    nodeB.flushOutBoxes()

    # No node from node A, node C, node D(node B is malicious anyway so not
    # considering it) should have more than one nomination for node D since
    # node D is slow. The one nomination for D, that nodes A, C
    # and D might have would be because of node B
    for node in [nodeA, nodeC, nodeD]:
        assert list(node.elector.nominations[instId].values()).count(
            Replica.generateName(nodeD.name, instId)) \
               <= 1

    primaryReplicas = ensureElectionsDone(looper=looper, nodes=nodes,
                                          retryWait=1, timeout=30)

    for node in nodes:
        logging.debug(
            "{}'s nominations {}".format(node, node.elector.nominations))
    # Node D should not have any primary
    assert not nodeD.hasPrimary
    # A node other than Node D should not have any replica among the
    # primary replicas
    assert nodeD.name not in [pr.name for pr in primaryReplicas]
Exemplo n.º 32
0
def testConnectWithoutKeySharingFails(tdir_for_func):
    """
    attempts at connecting to nodes when key sharing is disabled must fail
    """
    nodeNames = genNodeNames(5)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func,
                     keyshare=False) as nodes:
        with Looper(nodes) as looper:
            try:
                looper.run(
                        checkNodesConnected(nodes,
                                            RemoteState(None, None, None)))
            except RemoteNotFound:
                pass
            except KeyError as ex:
                assert [n for n in nodeNames
                        if n == ex.args[0]]
            except Exception:
                raise
Exemplo n.º 33
0
def testNodeConnectionAfterKeysharingRestarted(allPluginsPath, tdirAndLooper):
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    tdir, looper = tdirAndLooper
    timeout = 60
    names = ["Alpha", "Beta"]
    logger.debug(names)
    nrg = {n: nodeReg[n] for n in names}
    A, B = [TestNode(name, nodeRegistry=nrg, basedirpath=tdir,
                     pluginPaths=allPluginsPath)
            for name in names]
    looper.add(A)
    A.startKeySharing(timeout=timeout)
    looper.runFor(timeout+1)
    logger.debug("done waiting for A's timeout")
    looper.add(B)
    B.startKeySharing(timeout=timeout)
    looper.runFor(timeout+1)
    logger.debug("done waiting for B's timeout")
    A.startKeySharing(timeout=timeout)
    B.startKeySharing(timeout=timeout)
    looper.run(checkNodesConnected([A, B]))
Exemplo n.º 34
0
def testNodeConnectionAfterKeysharingRestarted():
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    with TemporaryDirectory() as td:
        print("temporary directory: {}".format(td))
        with Looper() as looper:
            timeout = 60
            names = ["Alpha", "Beta"]
            print(names)
            nrg = {n: nodeReg[n] for n in names}
            A, B = [TestNode(name, nrg, basedirpath=td) for name in names]
            looper.add(A)
            A.startKeySharing(timeout=timeout)
            looper.runFor(timeout + 1)
            print("done waiting for A's timeout")
            looper.add(B)
            B.startKeySharing(timeout=timeout)
            looper.runFor(timeout + 1)
            print("done waiting for B's timeout")
            A.startKeySharing(timeout=timeout)
            B.startKeySharing(timeout=timeout)
            looper.run(checkNodesConnected([A, B]))
Exemplo n.º 35
0
def testNodeConnectionAfterKeysharingRestarted():
    console = getConsole()
    console.reinit(flushy=True, verbosity=console.Wordage.verbose)
    with TemporaryDirectory() as td:
        print("temporary directory: {}".format(td))
        with Looper() as looper:
            timeout = 60
            names = ["Alpha", "Beta"]
            print(names)
            nrg = {n: nodeReg[n] for n in names}
            A, B = [Node(name, nrg, basedirpath=td)
                    for name in names]
            looper.add(A)
            A.startKeySharing(timeout=timeout)
            looper.runFor(timeout+1)
            print("done waiting for A's timeout")
            looper.add(B)
            B.startKeySharing(timeout=timeout)
            looper.runFor(timeout+1)
            print("done waiting for B's timeout")
            A.startKeySharing(timeout=timeout)
            B.startKeySharing(timeout=timeout)
            looper.run(checkNodesConnected([A, B]))
def testProtocolInstanceCannotBecomeActiveWithLessThanFourServers(
        tdir_for_func):
    """
    A protocol instance must have at least 4 nodes to come up.
    The status of the nodes will change from starting to started only after the
    addition of the fourth node to the system.
    """
    nodeCount = 16
    f = 5
    minimumNodesToBeUp = 16 - f

    nodeNames = genNodeNames(nodeCount)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:

            for n in nodeSet:
                n.startKeySharing()

            # helpers

            def genExpectedStates(connecteds: Iterable[str]):
                return {
                    nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED
                    for nn in nodeNames
                }

            def checkNodeStatusRemotesAndF(expectedStatus: Status,
                                           nodeIdx: int):
                for node in nodeSet.nodes.values():
                    checkNodeRemotes(
                        node, genExpectedStates(nodeNames[:nodeIdx + 1]))
                    assert node.status == expectedStatus

            def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):
                logger.info("Add back the {} node and see status of {}".format(
                    ordinal(nodeIdx + 1), expectedStatus))
                addNodeBack(nodeSet, looper, nodeNames[nodeIdx])
                looper.run(
                    eventually(checkNodeStatusRemotesAndF,
                               expectedStatus,
                               nodeIdx,
                               retryWait=1,
                               timeout=30))

            # tests

            logger.debug("Sharing keys")
            looper.run(checkNodesConnected(nodeSet))

            logger.debug("Remove all the nodes")
            for n in nodeNames:
                looper.removeProdable(nodeSet.nodes[n])
                nodeSet.removeNode(n, shouldClean=False)

            logger.debug("Add nodes back one at a time")
            for i in range(nodeCount):
                nodes = i + 1
                if nodes < minimumNodesToBeUp:
                    expectedStatus = Status.starting
                elif nodes < nodeCount:
                    expectedStatus = Status.started_hungry
                else:
                    expectedStatus = Status.started
                addNodeBackAndCheck(i, expectedStatus)
Exemplo n.º 37
0
def testCatchupDelayedNodes(txnPoolNodeSet, nodeSetWithNodeAddedAfterSomeTxns,
                            txnPoolCliNodeReg, tdirWithPoolTxns, tconf,
                            allPluginsPath):
    """
    Node sends catchup request to other nodes for only those sequence numbers
    that other nodes have. Have pool of connected nodes with some transactions
    made and then two more nodes say X and Y will join where Y node will start
    its catchup process after some time. The node starting late, i.e. Y should
    not receive any catchup requests
    :return:
    """
    looper, _, _, _, client, wallet = nodeSetWithNodeAddedAfterSomeTxns
    stewardXName = "testClientStewardX"
    nodeXName = "Zeta"
    stewardYName = "testClientStewardY"
    nodeYName = "Eta"
    stewardZName = "testClientStewardZ"
    nodeZName = "Theta"
    stewardX, nodeX = addNewStewardAndNode(looper,
                                           client,
                                           stewardXName,
                                           nodeXName,
                                           tdirWithPoolTxns,
                                           tconf,
                                           allPluginsPath,
                                           autoStart=False)
    stewardY, nodeY = addNewStewardAndNode(looper,
                                           client,
                                           stewardYName,
                                           nodeYName,
                                           tdirWithPoolTxns,
                                           tconf,
                                           allPluginsPath,
                                           autoStart=False)
    nodeX.nodeIbStasher.delay(cpDelay(45))
    nodeY.nodeIbStasher.delay(cpDelay(2))
    looper.add(nodeX)
    looper.add(nodeY)
    txnPoolNodeSet.append(nodeX)
    txnPoolNodeSet.append(nodeY)

    looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=60))
    logger.debug("Stopping 2 newest nodes, {} and {}".format(
        nodeX.name, nodeY.name))
    nodeX.stop()
    nodeY.stop()
    logger.debug("Sending requests")
    sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, 50)
    logger.debug("Starting the 2 stopped nodes, {} and {}".format(
        nodeX.name, nodeY.name))
    nodeX.start(looper.loop)
    nodeY.start(looper.loop)
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   nodeX,
                   *txnPoolNodeSet[:5],
                   retryWait=1,
                   timeout=15))
    looper.run(
        eventually(checkNodeLedgersForEquality,
                   nodeY,
                   *txnPoolNodeSet[:5],
                   retryWait=1,
                   timeout=15))
def testProtocolInstanceCannotBecomeActiveWithLessThanFourServers(
        tdir_for_func):
    """
    A protocol instance must have at least 4 nodes to come up.
    The status of the nodes will change from starting to started only after the
    addition of the fourth node to the system.
    """
    nodeCount = 16
    f = 5
    minimumNodesToBeUp = 16 - f

    nodeNames = genNodeNames(nodeCount)
    with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:

            for n in nodeSet:
                n.startKeySharing()

            # helpers

            def genExpectedStates(connecteds: Iterable[str]):
                return {
                    nn: CONNECTED if nn in connecteds else JOINED_NOT_ALLOWED
                    for nn in nodeNames}

            def checkNodeStatusRemotesAndF(expectedStatus: Status,
                                           nodeIdx: int):
                for node in nodeSet.nodes.values():
                    checkNodeRemotes(node,
                                     genExpectedStates(nodeNames[:nodeIdx + 1]))
                    assert node.status == expectedStatus

            def addNodeBackAndCheck(nodeIdx: int, expectedStatus: Status):
                logging.info("Add back the {} node and see status of {}".
                             format(ordinal(nodeIdx + 1), expectedStatus))
                addNodeBack(nodeSet, looper, nodeNames[nodeIdx])
                looper.run(
                        eventually(checkNodeStatusRemotesAndF, expectedStatus,
                                   nodeIdx,
                                   retryWait=1, timeout=30))

            # tests

            logging.debug("Sharing keys")
            looper.run(checkNodesConnected(nodeSet))

            logging.debug("Remove all the nodes")
            for n in nodeNames:
                looper.removeProdable(nodeSet.nodes[n])
                nodeSet.removeNode(n, shouldClean=False)

            logging.debug("Add nodes back one at a time")
            for i in range(nodeCount):
                nodes = i + 1
                if nodes < minimumNodesToBeUp:
                    expectedStatus = Status.starting
                elif nodes < nodeCount:
                    expectedStatus = Status.started_hungry
                else:
                    expectedStatus = Status.started
                addNodeBackAndCheck(i, expectedStatus)
Exemplo n.º 39
0
def pool(looper, nodeSet):
    for n in nodeSet:  # type: TestNode
        n.startKeySharing()
    looper.run(checkNodesConnected(nodeSet))
    checkProtocolInstanceSetup(looper, nodeSet, timeout=5)
    return adict(looper=looper, nodeset=nodeSet)
Exemplo n.º 40
0
def pool(looper, nodeSet):
    for n in nodeSet:  # type: TestNode
        n.startKeySharing()
    looper.run(checkNodesConnected(nodeSet))
    checkProtocolInstanceSetup(looper, nodeSet, timeout=5)
    return adict(looper=looper, nodeset=nodeSet)
Exemplo n.º 41
0
                    primaryDecider=None)


def whitelistClient(nodes, *clientNames):
    for node in nodes:
        for nm in clientNames:
            node.whitelistClient(nm)


looper = Looper(nodes, autoStart=True)
for node in nodes:
    node.startKeySharing()
    node.start(looper)
    # node.addGenesisTxns(genesisTxns(stewardSigner))

looper.run(checkNodesConnected(nodes))
ensureElectionsDone(looper=looper, nodes=nodes, retryWait=1, timeout=30)

steward, _ = genTestClient(nodes, tmpdir=tdir)
# whitelistClient(nodes, steward.name)
steward.registerObserver(stewardWallet.handleIncomingReply)
looper.add(steward)
looper.run(steward.ensureConnectedToNodes())
makePendingTxnsRequest(steward, stewardWallet)

createNym(looper, sponsorWallet.defaultId, steward, stewardWallet, SPONSOR)

sponsor, _ = genTestClient(nodes, tmpdir=tdir)
sponsor.registerObserver(sponsorWallet.handleIncomingReply)
# whitelistClient(nodes, sponsor.name)
looper.add(sponsor)
Exemplo n.º 42
0
def ready(looper, keySharedNodes):
    looper.run(checkNodesConnected(keySharedNodes))
    return keySharedNodes
Exemplo n.º 43
0
def ready(looper, keySharedNodes):
    looper.run(checkNodesConnected(keySharedNodes))
    return keySharedNodes
Exemplo n.º 44
0
def pool(looper, txnPoolNodeSet):
    # for n in nodeSet:  # type: TestNode
    #     n.startKeySharing()
    looper.run(checkNodesConnected(txnPoolNodeSet))
    checkProtocolInstanceSetup(looper, txnPoolNodeSet)
    return adict(looper=looper, nodeset=txnPoolNodeSet)