def testPropagateRecvdBeforeRequest(setup, looper, txnPoolNodeSet, sent1): A, B, C, D = txnPoolNodeSet def x(): # A should not have received a request from the client assert len(recvdRequest(A)) == 0 # A should have received only one PROPAGATE assert len(recvdPropagate(A)) == 1 # A should have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec - 2 looper.run(eventually(x, retryWait=.5, timeout=timeout)) def y(): # A should have received a request from the client assert len(recvdRequest(A)) == 1 # A should still have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec + 2 looper.run(eventually(y, retryWait=.5, timeout=timeout)) def chk(): # A should have forwarded the request assertLength(forwardedRequest(A), 1) timeout = waits.expectedClientRequestPropagationTime( len(txnPoolNodeSet)) + delaySec looper.run(eventually(chk, retryWait=1, timeout=timeout)) auth_obj = A.authNr(0).core_authenticator auth_calling_count = get_count(auth_obj, auth_obj.authenticate) assert auth_calling_count == reqCount
def testPropagateRecvdBeforeRequest(setup, looper, nodeSet, up, sent1): A, B, C, D = nodeSet.nodes.values() def x(): # A should not have received a request from the client assert len(recvdRequest(A)) == 0 # A should have received only one PROPAGATE assert len(recvdPropagate(A)) == 1 # A should have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec - 2 looper.run(eventually(x, retryWait=.5, timeout=timeout)) def y(): # A should have received a request from the client assert len(recvdRequest(A)) == 1 # A should still have sent only one PROPAGATE assert len(sentPropagate(A)) == 1 timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delaySec + 2 looper.run(eventually(y, retryWait=.5, timeout=timeout)) def chk(): # A should have forwarded the request assertLength(forwardedRequest(A), 1) timeout = waits.expectedClientRequestPropagationTime(len(nodeSet)) + delaySec looper.run(eventually(chk, retryWait=1, timeout=timeout))
def testTestNodeDelay(tdir_for_func): nodeNames = {"testA", "testB"} with TestNodeSet(names=nodeNames, tmpdir=tdir_for_func) as nodes: nodeA = nodes.getNode("testA") nodeB = nodes.getNode("testB") with Looper(nodes) as looper: looper.run(checkNodesConnected(nodes)) # send one message, without delay looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB)) # set delay, then send another message # and find that it doesn't arrive delay = 5 * waits.expectedNodeToNodeMessageDeliveryTime() nodeB.nodeIbStasher.delay( delayerMsgTuple(delay, TestMsg, nodeA.name) ) with pytest.raises(AssertionError): looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB)) # but then find that it arrives after the delay # duration has passed timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delay looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB, customTimeout=timeout)) # reset the delay, and find another message comes quickly nodeB.nodeIbStasher.reset_delays_and_process_delayeds() looper.run(sendMessageAndCheckDelivery(nodes, nodeA, nodeB))
async def sendMessageAndCheckDelivery(sender: Node, reciever: Node, msg: Optional[Tuple] = None, method=None, customTimeout=None): """ Sends message from one node to another and checks that it was delivered :param sender: sender :param reciever: recepient :param msg: optional message - by default random one generated :param customTimeout: :return: """ logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name)) msg = msg if msg else randomMsg() rid = sender.nodestack.getRemote(reciever.name).uid sender.nodestack.send(msg, rid) timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime() await eventually(checkMessageReceived, msg, reciever, method, retryWait=.1, timeout=timeout, ratchetSteps=10)
def testInstanceChangeMsgTypeChecking(txnPoolNodeSet, looper): nodeA = txnPoolNodeSet[0] nodeB = txnPoolNodeSet[1] ridBeta = nodeA.nodestack.getRemote(nodeB.name).uid def createInstanceChangeMessage(): # Creating a message this way to exclude # client-side validation of viewNo goodViewNo = 1 badViewNo = "BAD" icMsg = InstanceChange(viewNo=goodViewNo, reason=0) icMsg._fields["viewNo"] = badViewNo return icMsg icMsg = createInstanceChangeMessage() nodeA.send(icMsg, ridBeta) looper.runFor(0.2) params = nodeB.spylog.getLastParams(TestNode.discard) def chk(): assert re.search(DISCARD_REASON, str(params['reason'])) timeout = waits.expectedNodeToNodeMessageDeliveryTime() looper.run(eventually(chk, timeout=timeout))
async def sendMessageAndCheckDelivery(sender: Node, reciever: Node, msg: Optional[Tuple] = None, method=None, customTimeout=None): """ Sends message from one node to another and checks that it was delivered :param sender: sender :param reciever: recepient :param msg: optional message - by default random one generated :param customTimeout: :return: """ logger.debug("Sending msg from {} to {}".format(sender.name, reciever.name)) msg = msg if msg else randomMsg() rid = sender.nodestack.getRemote(reciever.name).uid sender.nodestack.send(msg, rid) timeout = customTimeout or waits.expectedNodeToNodeMessageDeliveryTime() await eventually(checkMessageReceived, msg, reciever, method, retryWait=.1, timeout=timeout, ratchetSteps=10)
def testNonPrimaryRecvs3PhaseMessageOutsideWatermarks(chkFreqPatched, looper, txnPoolNodeSet, client1, wallet1, client1Connected, reqs_for_logsize): """ A node is slow in processing PRE-PREPAREs and PREPAREs such that lot of requests happen and the slow node has started getting 3 phase messages outside of it watermarks. Check that it queues up requests outside watermarks and once it has received stable checkpoint it processes more requests. It sends other nodes 3 phase messages older than their stable checkpoint so they should discard them. """ delay = 15 instId = 1 reqsToSend = reqs_for_logsize + 2 npr = getNonPrimaryReplicas(txnPoolNodeSet, instId) slowReplica = npr[0] slowNode = slowReplica.node slowNode.nodeIbStasher.delay(ppDelay(delay, instId)) slowNode.nodeIbStasher.delay(pDelay(delay, instId)) def discardCounts(replicas, pat): counts = {} for r in replicas: counts[r.name] = countDiscarded(r, pat) return counts oldStashCount = slowReplica.spylog.count( TestReplica.stashOutsideWatermarks.__name__) oldDiscardCounts = discardCounts( [n.replicas[instId] for n in txnPoolNodeSet if n != slowNode], 'achieved stable checkpoint') sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, reqsToSend, 1) timeout = waits.expectedPoolGetReadyTimeout(len(txnPoolNodeSet)) looper.run( eventually(checkNodeDataForEquality, slowNode, *[_ for _ in txnPoolNodeSet if _ != slowNode], retryWait=1, timeout=timeout)) newStashCount = slowReplica.spylog.count( TestReplica.stashOutsideWatermarks.__name__) assert newStashCount > oldStashCount def chk(): counts = discardCounts( [n.replicas[instId] for n in txnPoolNodeSet if n != slowNode], 'achieved stable checkpoint') for nm, count in counts.items(): assert count > oldDiscardCounts[nm] timeout = waits.expectedNodeToNodeMessageDeliveryTime() * \ len(txnPoolNodeSet) + delay looper.run(eventually(chk, retryWait=1, timeout=timeout))
def testTestNodeDelay(looper, txnPoolNodeSet): looper.run(checkNodesConnected(txnPoolNodeSet)) nodeA = txnPoolNodeSet[0] nodeB = txnPoolNodeSet[1] # send one message, without delay looper.run(sendMessageAndCheckDelivery(nodeA, nodeB)) # set delay, then send another message # and find that it doesn't arrive delay = 5 * waits.expectedNodeToNodeMessageDeliveryTime() nodeB.nodeIbStasher.delay(delayerMsgTuple(delay, TestMsg, nodeA.name)) with pytest.raises(AssertionError): looper.run(sendMessageAndCheckDelivery(nodeA, nodeB)) # but then find that it arrives after the delay # duration has passed timeout = waits.expectedNodeToNodeMessageDeliveryTime() + delay looper.run(sendMessageAndCheckDelivery(nodeA, nodeB, customTimeout=timeout)) # reset the delay, and find another message comes quickly nodeB.nodeIbStasher.reset_delays_and_process_delayeds() looper.run(sendMessageAndCheckDelivery(nodeA, nodeB))
def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet, nodeSetWithNodeAddedAfterSomeTxns, newNodeCaughtUp, tdirWithPoolTxns, tconf, allPluginsPath): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) :return: """ looper, nodeX, client, wallet, _, _ = nodeSetWithNodeAddedAfterSomeTxns viewNo = nodeX.viewNo # Force two view changes: node discards msgs which have viewNo # at least two less than node's. Current protocol implementation # needs to hold messages from the previous view as well as # from the current view. for i in range(2): ensure_view_change(looper, txnPoolNodeSet) waitNodeDataEquality(looper, nodeX, *txnPoolNodeSet[:-1]) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sender = txnPoolNodeSet[0] rid_x_node = sender.nodestack.getRemote(nodeX.name).uid messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded primaryRepl = getPrimaryReplica(txnPoolNodeSet) three_pc = PrePrepare( 0, viewNo, 10, time.time(), [[wallet.defaultId, wallet._getIdData().lastReqId + 1]], 1, "random digest", DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), ) sender.send(three_pc, rid_x_node) looper.run( eventually(checkDiscardMsg, [ nodeX, ], three_pc, 'un-acceptable viewNo', retryWait=1, timeout=messageTimeout))
def testNodeDiscardMessageFromUnknownView( txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_new_node_caught_up, allPluginsPath, wallet1): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) :return: """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns viewNo = new_node.viewNo # Force two view changes: node discards msgs which have viewNo # at least two less than node's. Current protocol implementation # needs to hold messages from the previous view as well as # from the current view. for i in range(2): ensure_view_change(looper, txnPoolNodeSet) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sender = txnPoolNodeSet[0] rid_x_node = sender.nodestack.getRemote(new_node.name).uid messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded primaryRepl = getPrimaryReplica(txnPoolNodeSet) three_pc = PrePrepare( 0, viewNo, 10, get_utc_epoch(), [[wallet1.defaultId, Request.gen_req_id()]], 1, "random digest", DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), ) sender.send(three_pc, rid_x_node) looper.run( eventually(checkDiscardMsg, [ new_node, ], three_pc, 'un-acceptable viewNo', retryWait=1, timeout=messageTimeout))
def testNodeDiscardMessageFromUnknownView(txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_new_node_caught_up, allPluginsPath, sdk_wallet_client): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) :return: """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns viewNo = new_node.viewNo # Force two view changes: node discards msgs which have viewNo # at least two less than node's. Current protocol implementation # needs to hold messages from the previous view as well as # from the current view. for i in range(2): ensure_view_change(looper, txnPoolNodeSet) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) sender = txnPoolNodeSet[0] rid_x_node = sender.nodestack.getRemote(new_node.name).uid messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded _, did = sdk_wallet_client primaryRepl = getPrimaryReplica(txnPoolNodeSet) three_pc = PrePrepare( 0, viewNo, 10, get_utc_epoch(), ["random request digest"], init_discarded(), "random digest", DOMAIN_LEDGER_ID, primaryRepl.stateRootHash(DOMAIN_LEDGER_ID), primaryRepl.txnRootHash(DOMAIN_LEDGER_ID), 0, True ) sender.send(three_pc, rid_x_node) looper.run(eventually(checkDiscardMsg, [new_node, ], three_pc, 'un-acceptable viewNo', retryWait=1, timeout=messageTimeout))
def testNodeDiscardMessageFromUnknownView( txnPoolNodeSet, sdk_node_set_with_node_added_after_some_txns, sdk_new_node_caught_up, allPluginsPath, sdk_wallet_client): """ Node discards 3-phase or ViewChangeDone messages from view nos that it does not know of (view nos before it joined the pool) :return: """ looper, new_node, sdk_pool_handle, new_steward_wallet_handle = \ sdk_node_set_with_node_added_after_some_txns viewNo = new_node.viewNo pp_seq_no = get_pp_seq_no(txnPoolNodeSet) # Force two view changes: node discards msgs which have viewNo # at least two less than node's. Current protocol implementation # needs to hold messages from the previous view as well as # from the current view. for i in range(1): ensure_view_change(looper, txnPoolNodeSet) waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1]) checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1) pp_seq_no += 1 sender = txnPoolNodeSet[1] rid_x_node = sender.nodestack.getRemote(new_node.name).uid messageTimeout = waits.expectedNodeToNodeMessageDeliveryTime() # 3 pc msg (PrePrepare) needs to be discarded _, did = sdk_wallet_client primaryRepl = getPrimaryReplica(txnPoolNodeSet) inst_id = 0 three_pc = create_pre_prepare_no_bls( primaryRepl.node.db_manager.get_state_root_hash(DOMAIN_LEDGER_ID), viewNo, pp_seq_no=pp_seq_no + 1, inst_id=inst_id) sender.send(three_pc, rid_x_node) looper.run( eventually(checkDiscardMsg, [ new_node.replicas[inst_id].stasher, ], three_pc, OLD_VIEW, retryWait=1, timeout=messageTimeout))