コード例 #1
0
def test_non_trust_anchor_cannot_add_attribute_for_user(nodeSet, nonTrustAnchor, trustAnchor, addedTrustAnchor, userIdA,
                                            looper, attributeData):
    with whitelistextras('UnauthorizedClientRequest'):
        client, wallet = nonTrustAnchor

        createNym(looper,
                  wallet.defaultId,
                  trustAnchor,
                  addedTrustAnchor,
                  role=None,
                  verkey=wallet.getVerkey())

        attrib = Attribute(name='test1 attribute',
                           origin=wallet.defaultId,
                           value=attributeData,
                           dest=userIdA,
                           ledgerStore=LedgerStore.RAW)
        reqs = makeAttribRequest(client, wallet, attrib)
        timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
        looper.run(eventually(checkRejects,
                              client,
                              reqs[0].reqId,
                              "UnauthorizedClientRequest('Only identity "
                              "owner/guardian can add attribute for that identity'",
                              retryWait=1, timeout=timeout))
コード例 #2
0
ファイル: helper.py プロジェクト: chriswinc/indy-node
def checkIdentityRequestSucceed(looper, actingClient, actingWallet, idr):
    def chk():
        assert actingWallet.getTrustAnchoredIdentity(idr).seqNo is not None
    timeout = waits.expectedTransactionExecutionTime(
        len(actingClient.nodeReg)
    )
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
コード例 #3
0
def nymsAddedInQuickSuccession(nodeSet, addedTrustAnchor, looper,
                               trustAnchor, trustAnchorWallet):
    usigner = DidSigner()
    nym = usigner.verkey
    idy = Identity(identifier=nym)
    trustAnchorWallet.addTrustAnchoredIdentity(idy)
    # Creating a NYM request with same nym again
    req = idy.ledgerRequest()
    trustAnchorWallet._pending.appendleft((req, idy.identifier))
    reqs = trustAnchorWallet.preparePending()
    trustAnchor.submitReqs(*reqs)

    def check():
        assert trustAnchorWallet._trustAnchored[nym].seqNo

    timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(check, timeout=timeout))

    timeout = waits.expectedReqNAckQuorumTime()
    looper.run(eventually(checkNacks,
                          trustAnchor,
                          req.reqId,
                          "is already added",
                          retryWait=1, timeout=timeout))
    count = 0
    for node in nodeSet:
        for seq, txn in node.domainLedger.getAllTxn():
            if txn[TXN_TYPE] == NYM and txn[TARGET_NYM] == usigner.identifier:
                count += 1

    assert(count == len(nodeSet))
コード例 #4
0
def testPrePrepareDigest(setup, looper, sent1):
    """
    A primary replica sends PRE-PREPARE message with incorrect digest to the
    non primary replicas but non primary replicas should raise suspicion on
    encountering the PRE-PREPARE. Also it should send no PREPARE
    """
    primaryRep, nonPrimaryReps = setup.primaryRep, setup.nonPrimaryReps

    def chkSusp():
        for r in nonPrimaryReps:
            # Every node with non primary replicas of instance 0 should raise
            # suspicion
            susp_code = Suspicions.PPR_DIGEST_WRONG.code
            # Since the node sending bad requests might become primary of
            # some backup instance after view changes, it will again send a
            # PRE-PREPARE with incorrect digest, so other nodes might raise
            # suspicion more than once
            assert len(getNodeSuspicions(r.node,
                                         susp_code)) >= 1
            # No non primary replica should send any PREPARE
            assert len(sentPrepare(r, viewNo=0, ppSeqNo=1)) == 0

    numOfNodes = len(primaryRep.node.nodeReg)
    timeout = waits.expectedTransactionExecutionTime(numOfNodes)
    looper.run(eventually(chkSusp, retryWait=1, timeout=timeout))
コード例 #5
0
def testMultiplePrePrepareWithSameSeqNo(setup, looper, sent1):
    """
    A primary replica sends duplicate PRE-PREPARE messages to the non primary
    replicas but non primary replicas should raise suspicion on encountering
    each duplicate PRE-PREPARE. Also it should send only one PREPARE
    """

    primaryRep, nonPrimaryReps = setup.primaryRep, setup.nonPrimaryReps

    def chkSusp():
        for r in nonPrimaryReps:
            # Every node with non primary replicas of instance 0 should raise
            # suspicion twice, once for each extra PRE-PREPARE request

            suspectingNodes = \
                getNodeSuspicions(r.node,
                                  Suspicions.DUPLICATE_PPR_SENT.code)
            assert len(suspectingNodes) == 2

            # Each non primary replica should just send one PREPARE
            assert len(sentPrepare(r)) == 1

    numOfNodes = len(primaryRep.node.nodeReg)
    timeout = waits.expectedTransactionExecutionTime(numOfNodes)
    looper.run(eventually(chkSusp, retryWait=1, timeout=timeout))
コード例 #6
0
def test_request_older_than_stable_checkpoint_removed(chkFreqPatched, looper, txnPoolNodeSet, sdk_pool_handle,
                                                      sdk_wallet_steward, reqs_for_checkpoint):
    timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    max_batch_size = chkFreqPatched.Max3PCBatchSize

    # Send some requests (insufficient for checkpoint),
    # wait replies and check that current checkpoint is not stable
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, 2 * max_batch_size)
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, retryWait=1, timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size, 2)

    # From the steward send a request creating a user with None role
    sdk_wallet_user = sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward)
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, retryWait=1, timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size + 1, 3)

    # From the created user send a request creating another user.
    # Dynamic validation of this request must fail since a user with None role cannot create users.
    # However, the 3PC-batch with the sent request must be ordered.
    with pytest.raises(RequestRejectedException):
        sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_user)
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, retryWait=1, timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size + 2, 4)

    # Send more requests to cause checkpoint stabilization
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, max_batch_size)
    # Check that checkpoint is stable now
    # and verify that requests for it were removed
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, 0, retryWait=1, timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 0, 0)

    # Send more requests to cause new checkpoint
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_steward, reqs_for_checkpoint + 1)
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 2, 0, retryWait=1, timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 1, 1)
コード例 #7
0
def testMultipleInstanceChangeMsgsMarkNodeAsSuspicious(looper, txnPoolNodeSet):
    maliciousNode = txnPoolNodeSet[0]
    for i in range(0, 5):
        maliciousNode.send(maliciousNode.view_changer._create_instance_change_msg(i, 0))

    def chk(instId):
        for node in txnPoolNodeSet:
            if node.name != maliciousNode.name:
                args = getAllArgs(node, ViewChanger.process_instance_change_msg)
                assert len(args) == 5
                for arg in args:
                    assert arg['frm'] == maliciousNode.name

    numOfNodes = len(txnPoolNodeSet)
    instanceChangeTimeout = waits.expectedPoolViewChangeStartedTimeout(
        numOfNodes)

    for i in range(0, 5):
        looper.run(eventually(chk, i, retryWait=1,
                              timeout=instanceChangeTimeout))

    def g():
        for node in txnPoolNodeSet:
            if node.name != maliciousNode.name:
                frm, reason, code = getAllArgs(node, Node.reportSuspiciousNode)
                assert frm == maliciousNode.name
                assert isinstance(reason, SuspiciousNode)
                suspectingNodes = \
                    getNodeSuspicions(node,
                                      Suspicions.FREQUENT_INST_CHNG.code)
                assert len(suspectingNodes) == 13

    timeout = waits.expectedTransactionExecutionTime(numOfNodes)
    looper.run(eventually(g, retryWait=1, timeout=timeout))
コード例 #8
0
def testPrepareDigest(setup, looper, sent1):
    """
    A non primary replica sends PREPARE message with incorrect digest to all
    other replicas. Other replicas should raise suspicion the
    PREPARE seen
    """

    primaryRep, nonPrimaryReps, faultyRep = setup.primaryRep, \
                                            setup.nonPrimaryReps, \
                                            setup.faultyRep

    def chkSusp():
        for r in (primaryRep, *nonPrimaryReps):
            if r.name != faultyRep.name:
                # Every node except the one from which PREPARE with incorrect
                # digest was sent should raise suspicion for the PREPARE
                # message
                assert len(
                    getNodeSuspicions(
                        r.node,
                        Suspicions.PR_DIGEST_WRONG.code)) == 1

    numOfNodes = len(primaryRep.node.nodeReg)
    timeout = waits.expectedTransactionExecutionTime(numOfNodes)
    looper.run(eventually(chkSusp, retryWait=1, timeout=timeout))
コード例 #9
0
def test_lagged_checkpoint_completion(chkFreqPatched, looper, txnPoolNodeSet,
                                      sdk_wallet_client, sdk_pool_handle):
    """
    One node in a pool lags to order the last 3PC-batch in a checkpoint so that
    when it eventually orders this 3PC-batch and thus completes the checkpoint
    it has already received and stashed the corresponding checkpoint messages
    from all the other nodes. The test verifies that the node successfully
    processes the stashed checkpoint messages and stabilizes the checkpoint.
    """
    slow_node = txnPoolNodeSet[-1]

    # All the nodes in the pool normally orders all the 3PC-batches in a
    # checkpoint except the last 3PC-batch. The last 3PC-batch in the
    # checkpoint is ordered by all the nodes except one slow node because this
    # node lags to receive Commits.
    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 4)

    slow_node.nodeIbStasher.delay(cDelay())

    sdk_send_random_and_check(looper, txnPoolNodeSet,
                              sdk_pool_handle, sdk_wallet_client, 1)

    # All the other nodes complete the checkpoint and send Checkpoint messages
    # to others. The slow node receives and stashes these messages because it
    # has not completed the checkpoint.
    def check():
        for replica in slow_node.replicas.values():
            assert len(replica.checkpoints) == 1
            assert (1, 5) in replica.checkpoints
            assert replica.checkpoints[(1, 5)].seqNo == 4
            assert replica.checkpoints[(1, 5)].digest is None
            assert replica.checkpoints[(1, 5)].isStable is False

            assert len(replica.stashedRecvdCheckpoints) == 1
            assert 0 in replica.stashedRecvdCheckpoints
            assert len(replica.stashedRecvdCheckpoints[0]) == 1
            assert (1, 5) in replica.stashedRecvdCheckpoints[0]
            assert len(replica.stashedRecvdCheckpoints[0][(1, 5)]) == \
                len(txnPoolNodeSet) - 1

    stabilization_timeout = \
        waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    looper.run(eventually(check, timeout=stabilization_timeout))

    # Eventually the slow node receives Commits, orders the last 3PC-batch in
    # the checkpoint and thus completes it, processes the stashed checkpoint
    # messages and stabilizes the checkpoint.
    slow_node.nodeIbStasher.reset_delays_and_process_delayeds()

    looper.runFor(waits.expectedOrderingTime(len(txnPoolNodeSet)))

    for replica in slow_node.replicas.values():
        assert len(replica.checkpoints) == 1
        assert (1, 5) in replica.checkpoints
        assert replica.checkpoints[(1, 5)].seqNo == 5
        assert replica.checkpoints[(1, 5)].digest is not None
        assert replica.checkpoints[(1, 5)].isStable is True

        assert len(replica.stashedRecvdCheckpoints) == 0
コード例 #10
0
def test_primary_recvs_3phase_message_outside_watermarks(perf_chk_patched, chkFreqPatched, looper, txnPoolNodeSet,
                                                         sdk_pool_handle, sdk_wallet_client, reqs_for_logsize):
    """
    One of the primary starts getting lot of requests, more than his log size
    and queues up requests since they will go beyond its watermarks. This
    happens since other nodes are slow in processing its PRE-PREPARE.
    Eventually this primary will send PRE-PREPARE for all requests and those
    requests will complete
    """
    tconf = perf_chk_patched
    delay = 2
    instId = 0
    reqs_to_send = 2 * reqs_for_logsize + 1
    logger.debug('Will send {} requests'.format(reqs_to_send))

    npr = getNonPrimaryReplicas(txnPoolNodeSet, instId)
    pr = getPrimaryReplica(txnPoolNodeSet, instId)
    orderedCount = pr.stats.get(TPCStat.OrderSent)

    for r in npr:
        r.node.nodeIbStasher.delay(ppDelay(delay, instId))
        r.node.nodeIbStasher.delay(pDelay(delay, instId))

    tm_exec_1_batch = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    batch_count = math.ceil(reqs_to_send / tconf.Max3PCBatchSize)
    total_timeout = (tm_exec_1_batch + delay) * batch_count

    def chk():
        assert orderedCount + batch_count == pr.stats.get(TPCStat.OrderSent)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_to_send)
    looper.run(eventually(chk, retryWait=1, timeout=total_timeout))
コード例 #11
0
ファイル: helper.py プロジェクト: michaeldboyd/indy-plenum
def wait_for_requests_ordered(looper, nodes, requests):
    node_count = len(nodes)
    timeout_per_request = waits.expectedTransactionExecutionTime(node_count)
    total_timeout = (1 + len(requests) / 10) * timeout_per_request
    coros = [partial(check_request_ordered,
                     node,
                     request)
             for (node, request) in list(itertools.product(nodes, requests))]
    looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=total_timeout))
コード例 #12
0
ファイル: test_nym_attrib.py プロジェクト: dougives/indy-node
def testClientGetsResponseWithoutConsensusForUsedReqId(
        nodeSet,
        looper,
        steward,
        addedTrustAnchor,
        trustAnchor,
        userWalletA,
        attributeName,
        attributeData,
        addedRawAttribute):
    lastReqId = None
    replies = {}
    for msg, sender in reversed(trustAnchor.inBox):
        if msg[OP_FIELD_NAME] == REPLY:
            if not lastReqId:
                lastReqId = get_req_id(msg[f.RESULT.nm])
            if get_req_id(msg[f.RESULT.nm]) == lastReqId:
                replies[sender] = msg
            if len(replies) == len(nodeSet):
                break

    trustAnchorWallet = addedTrustAnchor
    attrib = Attribute(name=attributeName,
                       origin=trustAnchorWallet.defaultId,
                       value=attributeData,
                       dest=userWalletA.defaultId,
                       ledgerStore=LedgerStore.RAW)
    trustAnchorWallet.addAttribute(attrib)
    req = trustAnchorWallet.preparePending()[0]
    _, key = trustAnchorWallet._prepared.pop((req.identifier, req.reqId))
    req.reqId = lastReqId

    req.signature = trustAnchorWallet.signMsg(
        msg=req.signingState(identifier=req.identifier),
        identifier=req.identifier)
    trustAnchorWallet._prepared[req.identifier, req.reqId] = req, key
    trustAnchor.submitReqs(req)

    def chk():
        nonlocal trustAnchor, lastReqId, replies
        for node in nodeSet:
            last = node.spylog.getLast(TestNode.getReplyFromLedger.__name__)
            assert last
            result = last.result
            assert result is not None

            replies[node.clientstack.name][f.RESULT.nm].pop(TXN_TIME, None)
            result.result.pop(TXN_TIME, None)

            assert {k: v for k, v in result.result.items() if v is not None}.items() <= \
                   replies[node.clientstack.name][f.RESULT.nm].items()

    timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
コード例 #13
0
ファイル: helper.py プロジェクト: dhh1128/indy-client
def ensureNymAdded(cli, nym, role=None):
    ensureConnectedToTestEnv(cli)
    cmd = "send NYM {dest}={nym}".format(dest=TARGET_NYM, nym=nym)
    if role:
        cmd += " {ROLE}={role}".format(ROLE=ROLE, role=role)
    cli.enterCmd(cmd)
    timeout = waits.expectedTransactionExecutionTime(len(cli.nodeReg))
    cli.looper.run(
        eventually(chkNymAddedOutput, cli, nym, retryWait=1, timeout=timeout))

    timeout = waits.expectedTransactionExecutionTime(len(cli.nodeReg))
    cli.enterCmd("send GET_NYM {dest}={nym}".format(dest=TARGET_NYM, nym=nym))
    cli.looper.run(eventually(checkGetNym, cli, nym, retryWait=1, timeout=timeout))

    cli.enterCmd('send ATTRIB {dest}={nym} raw={raw}'.
                 format(dest=TARGET_NYM, nym=nym,
                        # raw='{\"attrName\":\"attrValue\"}'))
                        raw=json.dumps({"attrName": "attrValue"})))
    timeout = waits.expectedTransactionExecutionTime(len(cli.nodeReg))
    cli.looper.run(eventually(checkAddAttr, cli, retryWait=1, timeout=timeout))
コード例 #14
0
ファイル: helper.py プロジェクト: michaeldboyd/indy-plenum
def sdk_eval_timeout(req_count: int, node_count: int,
                     customTimeoutPerReq: float = None, add_delay_to_timeout: float = 0):
    timeout_per_request = customTimeoutPerReq or waits.expectedTransactionExecutionTime(node_count)
    timeout_per_request += add_delay_to_timeout
    # here we try to take into account what timeout for execution
    # N request - total_timeout should be in
    # timeout_per_request < total_timeout < timeout_per_request * N
    # we cannot just take (timeout_per_request * N) because it is so huge.
    # (for timeout_per_request=5 and N=10, total_timeout=50sec)
    # lets start with some simple formula:
    return (1 + req_count / 10) * timeout_per_request
コード例 #15
0
def test_old_checkpoint_deleted(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_checkpoint):
    """
    Send requests more than twice of `CHK_FREQ`, there should be one new stable
    checkpoint on each replica. The old stable checkpoint should be removed
    """
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * reqs_for_checkpoint)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 1)

    timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 2, 0, retryWait=1, timeout=timeout))
コード例 #16
0
def test_stable_checkpoint_when_one_instance_slow(chkFreqPatched, looper, txnPoolNodeSet, sdk_pool_handle,
                                                  sdk_wallet_client, reqs_for_checkpoint):
    delay = 5
    pr = getPrimaryReplica(txnPoolNodeSet, 1)
    slowNode = pr.node
    otherNodes = [n for n in txnPoolNodeSet if n != slowNode]
    for n in otherNodes:
        n.nodeIbStasher.delay(ppDelay(delay, 1))

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_checkpoint)
    timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)) + delay
    looper.run(eventually(chkChkpoints, txnPoolNodeSet, 1, 0, retryWait=1, timeout=timeout))
コード例 #17
0
ファイル: conftest.py プロジェクト: Artemkaaas/sovrin-node
def nodeThetaAdded(looper, nodeSet, tdirWithPoolTxns, tconf, steward,
                   stewardWallet, allPluginsPath, testNodeClass,
                   testClientClass, tdir):
    newStewardName = "testClientSteward" + randomString(3)
    newNodeName = "Theta"
    newSteward, newStewardWallet = getClientAddedWithRole(nodeSet, tdir,
                                                          looper, steward,
                                                          stewardWallet,
                                                          newStewardName,
                                                          role=STEWARD)

    sigseed = randomString(32).encode()
    nodeSigner = SimpleSigner(seed=sigseed)

    (nodeIp, nodePort), (clientIp, clientPort) = genHa(2)

    data = {
        NODE_IP: nodeIp,
        NODE_PORT: nodePort,
        CLIENT_IP: clientIp,
        CLIENT_PORT: clientPort,
        ALIAS: newNodeName,
        SERVICES: [VALIDATOR, ]
    }

    node = Node(nodeSigner.identifier, data, newStewardWallet.defaultId)

    newStewardWallet.addNode(node)
    reqs = newStewardWallet.preparePending()
    req, = newSteward.submitReqs(*reqs)

    waitForSufficientRepliesForRequests(looper, newSteward, requests=[req])

    def chk():
        assert newStewardWallet.getNode(node.id).seqNo is not None

    timeout = plenumWaits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(chk, retryWait=1, timeout=timeout))

    initNodeKeysForBothStacks(newNodeName, tdirWithPoolTxns, sigseed, override=True)

    newNode = testNodeClass(newNodeName, basedirpath=tdir, config=tconf,
                            ha=(nodeIp, nodePort), cliha=(clientIp, clientPort),
                            pluginPaths=allPluginsPath)

    nodeSet.append(newNode)
    looper.add(newNode)
    looper.run(checkNodesConnected(nodeSet))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward,
                                                  *nodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *nodeSet)
    return newSteward, newStewardWallet, newNode
コード例 #18
0
ファイル: helper.py プロジェクト: Artemkaaas/sovrin-node
 async def send(self, op, org=None):
     org = org if org else self.actor
     req = org.client.submit(op)[0]
     timeout = plenumWaits.expectedTransactionExecutionTime(
         len(self.nodes))
     for node in self.nodes:
         await eventually(checkLastClientReqForNode,
                          node,
                          req,
                          retryWait=1,
                          timeout=timeout)
     return req
コード例 #19
0
def testClientGetsResponseWithoutConsensusForUsedReqId(nodeSet, looper, steward,
                                                       addedTrustAnchor, trustAnchor,
                                                       userWalletA,
                                                       attributeName,
                                                       attributeData,
                                                       addedRawAttribute):
    lastReqId = None
    replies = {}
    for msg, sender in reversed(trustAnchor.inBox):
        if msg[OP_FIELD_NAME] == REPLY:
            if not lastReqId:
                lastReqId = msg[f.RESULT.nm][f.REQ_ID.nm]
            if msg.get(f.RESULT.nm, {}).get(f.REQ_ID.nm) == lastReqId:
                replies[sender] = msg
            if len(replies) == len(nodeSet):
                break

    trustAnchorWallet = addedTrustAnchor
    attrib = Attribute(name=attributeName,
                       origin=trustAnchorWallet.defaultId,
                       value=attributeData,
                       dest=userWalletA.defaultId,
                       ledgerStore=LedgerStore.RAW)
    trustAnchorWallet.addAttribute(attrib)
    req = trustAnchorWallet.preparePending()[0]
    _, key = trustAnchorWallet._prepared.pop((req.identifier, req.reqId))
    req.reqId = lastReqId

    req.signature = trustAnchorWallet.signMsg(msg=req.signingState,
                                              identifier=req.identifier)
    trustAnchorWallet._prepared[req.identifier, req.reqId] = req, key
    trustAnchor.submitReqs(req)

    def chk():
        nonlocal trustAnchor, lastReqId, replies
        for node in nodeSet:
            last = node.spylog.getLast(TestNode.getReplyFromLedger.__name__)
            assert last
            result = last.result
            assert result is not None

            # TODO: Time is not equal as some precision is lost while storing
            # in oientdb, using seconds may be an option, need to think of a
            # use cases where time in milliseconds is required
            replies[node.clientstack.name][f.RESULT.nm].pop(TXN_TIME, None)
            result.result.pop(TXN_TIME, None)

            assert replies[node.clientstack.name][f.RESULT.nm] == \
                   {k:v for k, v in result.result.items() if v is not None}

    timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
コード例 #20
0
ファイル: helper.py プロジェクト: Artemkaaas/sovrin-node
def addAttributeAndCheck(looper, client, wallet, attrib):
    old = wallet.pendingCount
    pending = wallet.addAttribute(attrib)
    assert pending == old + 1
    reqs = wallet.preparePending()
    client.submitReqs(*reqs)

    def chk():
        assert wallet.getAttribute(attrib).seqNo is not None

    timeout = plenumWaits.expectedTransactionExecutionTime(client.totalNodes)
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
    return wallet.getAttribute(attrib).seqNo
コード例 #21
0
def testNonTrustAnchoredNymCanDoGetNym(nodeSet, addedTrustAnchor,
                                   trustAnchorWallet, tdir, looper):
    signer = DidSigner()
    someClient, _ = genTestClient(nodeSet, tmpdir=tdir, usePoolLedger=True)
    wallet = Wallet(someClient.name)
    wallet.addIdentifier(signer=signer)
    someClient.registerObserver(wallet.handleIncomingReply)
    looper.add(someClient)
    looper.run(someClient.ensureConnectedToNodes())
    needle = trustAnchorWallet.defaultId
    makeGetNymRequest(someClient, wallet, needle)
    timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(someClient.hasNym, needle, retryWait=1, timeout=timeout))
コード例 #22
0
def test_repeated_request_not_processed_if_already_ordered(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    delta = txnPoolNodeSet[3]
    initial_ledger_size = delta.domainLedger.size

    one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1)
    sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle)

    sdk_send_signed_requests(sdk_pool_handle, one_req)
    looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)))

    for node in txnPoolNodeSet:
        assert node.domainLedger.size - initial_ledger_size == 1
コード例 #23
0
ファイル: helper.py プロジェクト: michaeldboyd/indy-plenum
def sdk_check_request_is_not_returned_to_nodes(looper, nodeSet, request):
    instances = range(getNoInstances(len(nodeSet)))
    coros = []
    for node, inst_id in itertools.product(nodeSet, instances):
        c = partial(checkRequestNotReturnedToNode,
                    node=node,
                    identifier=request['identifier'],
                    reqId=request['reqId'],
                    instId=inst_id
                    )
        coros.append(c)
    timeout = waits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventuallyAll(*coros, retryWait=1, totalTimeout=timeout))
def test_node_erases_last_sent_pp_key_on_view_change(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf):

    # Get a node with a backup primary replica
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    node = replica.node

    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=3, num_batches=3,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 3)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that there is a stored last sent PrePrepare key on the node
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Make the pool perform view change
    ensure_view_change(looper, txnPoolNodeSet)
    ensureElectionsDone(looper, txnPoolNodeSet)

    # Verify that the node has erased the stored last sent PrePrepare key
    assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper, txnPoolNodeSet,
                               sdk_pool_handle, sdk_wallet_client,
                               num_reqs=1, num_batches=1,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (1, 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
コード例 #25
0
ファイル: helper.py プロジェクト: michaeldboyd/indy-plenum
def sdk_get_reply(looper, sdk_req_resp, timeout=None):
    req_json, resp_task = sdk_req_resp
    # TODO: change timeout evaluating logic, when sdk will can tuning timeout from outside
    if timeout is None:
        timeout = waits.expectedTransactionExecutionTime(7)
    try:
        resp = looper.run(asyncio.wait_for(resp_task, timeout=timeout))
        resp = json.loads(resp)
    except IndyError as e:
        resp = e.error_code
    except TimeoutError as e:
        resp = ErrorCode.PoolLedgerTimeout

    return req_json, resp
コード例 #26
0
ファイル: helper.py プロジェクト: dhh1128/indy-client
def submitPoolUpgrade(looper, senderClient, senderWallet, name, action, version,
                      schedule, timeout, sha256):
    upgrade = Upgrade(name, action, schedule, version, sha256, timeout,
                      senderWallet.defaultId)
    senderWallet.doPoolUpgrade(upgrade)
    reqs = senderWallet.preparePending()
    senderClient.submitReqs(*reqs)

    def check():
        assert senderWallet._upgrades[upgrade.key].seqNo
    timeout = waits.expectedTransactionExecutionTime(
        len(senderClient.nodeReg)
    )
    looper.run(eventually(check, timeout=timeout))
コード例 #27
0
ファイル: helper.py プロジェクト: Artemkaaas/sovrin-node
def getAttribute(looper, trustAnchor, trustAnchorWallet, userIdA, attributeName,
                 attributeValue):
    # Should be renamed to get_attribute_and_check
    attrib = Attribute(name=attributeName,
                       value=None,
                       dest=userIdA,
                       ledgerStore=LedgerStore.RAW)
    req = trustAnchorWallet.requestAttribute(attrib,
                                             sender=trustAnchorWallet.defaultId)
    trustAnchor.submitReqs(req)
    timeout = waits.expectedTransactionExecutionTime(len(trustAnchor.nodeReg))
    return looper.run(eventually(checkGetAttr, req.key, trustAnchor,
                                 attributeName, attributeValue, retryWait=1,
                                 timeout=timeout))
コード例 #28
0
def test_belated_propagate_not_processed_if_already_ordered(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    delta = txnPoolNodeSet[3]
    initial_ledger_size = delta.domainLedger.size
    delta.nodeIbStasher.delay(ppgDelay(300, 'Gamma'))

    one_req = sdk_signed_random_requests(looper, sdk_wallet_client, 1)
    sdk_send_and_check(one_req, looper, txnPoolNodeSet, sdk_pool_handle)

    delta.nodeIbStasher.reset_delays_and_process_delayeds()
    looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)))

    for node in txnPoolNodeSet:
        assert node.domainLedger.size - initial_ledger_size == 1
コード例 #29
0
ファイル: helper.py プロジェクト: dougives/indy-node
def createNym(looper, nym, creatorClient, creatorWallet: Wallet, role=None,
              verkey=None):
    idy = Identity(identifier=nym,
                   verkey=verkey,
                   role=role)
    creatorWallet.addTrustAnchoredIdentity(idy)
    reqs = creatorWallet.preparePending()
    creatorClient.submitReqs(*reqs)

    def check():
        assert creatorWallet._trustAnchored[nym].seqNo

    timeout = waits.expectedTransactionExecutionTime(
        len(creatorClient.nodeReg)
    )
    looper.run(eventually(check, retryWait=1, timeout=timeout))
コード例 #30
0
def testTrustyCancelsUpgrade(validUpgradeSent, looper, nodeSet, trustee,
                             trusteeWallet, validUpgrade):
    validUpgradeCopy = deepcopy(validUpgrade)
    validUpgradeCopy[ACTION] = CANCEL
    validUpgradeCopy[JUSTIFICATION] = '"not gonna give you one"'

    validUpgradeCopy.pop(SCHEDULE, None)
    upgrade, req = sendUpgrade(trustee, trusteeWallet, validUpgradeCopy)

    def check():
        assert trusteeWallet.getPoolUpgrade(upgrade.key).seqNo

    timeout = plenumWaits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(check, retryWait=1, timeout=timeout))

    looper.run(eventually(checkNoUpgradeScheduled, nodeSet, retryWait=1,
                          timeout=waits.expectedNoUpgradeScheduled()))
コード例 #31
0
def testLoggingTxnStateWhenCommitFails(looper, txnPoolNodeSet, sdk_pool_handle,
                                       sdk_wallet_steward, logsearch):
    logsPropagate, _ = logsearch(levels=['INFO'],
                                 files=['propagator.py'],
                                 funcs=['propagate'],
                                 msgs=['propagating.*request.*from client'])

    logsOrdered, _ = logsearch(levels=['INFO'],
                               files=['replica.py'],
                               funcs=['order_3pc_key'],
                               msgs=['ordered batch request'])

    logsCommitFail, _ = logsearch(levels=['WARNING'],
                                  files=['node.py'],
                                  funcs=['executeBatch'],
                                  msgs=['commit failed for batch request'])

    seed = randomString(32)
    wh, _ = sdk_wallet_steward

    nym_request, _ = looper.loop.run_until_complete(
        prepare_nym_request(sdk_wallet_steward, seed, "name", None))

    sdk_sign_and_send_prepared_request(looper, sdk_wallet_steward,
                                       sdk_pool_handle, nym_request)

    class SomeError(Exception):
        pass

    def commitPatched(node, commitOrig, *args, **kwargs):
        req_handler = node.get_req_handler(ledger_id=DOMAIN_LEDGER_ID)
        req_handler.commit = commitOrig
        raise SomeError(ERORR_MSG)

    excCounter = 0

    def executeBatchPatched(node, executeBatchOrig, *args, **kwargs):
        nonlocal excCounter
        try:
            executeBatchOrig(*args, **kwargs)
        except SomeError:
            excCounter += 1
            node.executeBatch = executeBatchOrig
            pass

    def checkSufficientExceptionsHappend():
        assert excCounter == len(txnPoolNodeSet)
        return

    for node in txnPoolNodeSet:
        req_handler = node.get_req_handler(ledger_id=DOMAIN_LEDGER_ID)
        req_handler.commit = functools.partial(commitPatched, node,
                                               req_handler.commit)
        node.executeBatch = functools.partial(executeBatchPatched, node,
                                              node.executeBatch)

    timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    looper.run(
        eventually(checkSufficientExceptionsHappend,
                   retryWait=1,
                   timeout=timeout))

    reqId = str(json.loads(nym_request)['reqId'])
    assert any(reqId in record.getMessage() for record in logsPropagate)
    assert any(reqId in record.getMessage() for record in logsOrdered)
    assert any(reqId in record.getMessage() for record in logsCommitFail)
    assert any(ERORR_MSG in record.getMessage() for record in logsCommitFail)
コード例 #32
0
def testReplicasRejectSamePrePrepareMsg(looper, txnPoolNodeSet,
                                        sdk_pool_handle, sdk_wallet_client):
    """
    Replicas should not accept PRE-PREPARE for view "v" and prepare sequence
    number "n" if it has already accepted a request with view number "v" and
    sequence number "n"

    """
    numOfNodes = 4
    fValue = getMaxFailures(numOfNodes)
    primaryRepl = getPrimaryReplica(txnPoolNodeSet, 1)
    logger.debug("Primary Replica: {}".format(primaryRepl))
    nonPrimaryReplicas = getNonPrimaryReplicas(txnPoolNodeSet, 1)
    logger.debug("Non Primary Replicas: " + str(nonPrimaryReplicas))

    # Delay COMMITs so request is not ordered and checks can be made
    c_delay = 10
    for node in txnPoolNodeSet:
        node.nodeIbStasher.delay(cDelay(delay=c_delay, instId=1))

    req1 = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                                    1)[0]
    request1 = sdk_json_to_request_object(req1[0])
    for npr in nonPrimaryReplicas:
        looper.run(
            eventually(checkPrepareReqSent,
                       npr,
                       request1.key,
                       primaryRepl.viewNo,
                       retryWait=1))
    prePrepareReq = primaryRepl._ordering_service.sent_preprepares[
        primaryRepl.viewNo, primaryRepl.lastPrePrepareSeqNo]
    looper.run(
        eventually(checkPrePrepareReqRecvd,
                   nonPrimaryReplicas,
                   prePrepareReq,
                   retryWait=1))

    # logger.debug("Patching the primary replica's pre-prepare sending method ")
    # orig_method = primaryRepl.sendPrePrepare

    # def patched(self, ppReq):
    #     self._ordering_service.sent_preprepares[ppReq.viewNo, ppReq.ppSeqNo] = ppReq
    #     ppReq = updateNamedTuple(ppReq, **{f.PP_SEQ_NO.nm: 1})
    #     self.send(ppReq, TPCStat.PrePrepareSent)
    #
    # primaryRepl.sendPrePrepare = types.MethodType(patched, primaryRepl)
    logger.debug(
        "Decrementing the primary replica's pre-prepare sequence number by "
        "one...")
    primaryRepl._ordering_service._lastPrePrepareSeqNo -= 1
    view_no = primaryRepl.viewNo
    request2 = sdk_json_to_request_object(
        sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                                 1)[0][0])
    timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet))
    looper.run(
        eventually(checkPrePrepareReqSent,
                   primaryRepl,
                   request2,
                   retryWait=1,
                   timeout=timeout))

    # Since the node is malicious, it will not be able to process requests due
    # to conflicts in PRE-PREPARE
    primaryRepl.node.stop()
    looper.removeProdable(primaryRepl.node)

    reqIdr = [request2.digest]
    prePrepareReq = PrePrepare(
        primaryRepl.instId, view_no, primaryRepl.lastPrePrepareSeqNo,
        get_utc_epoch(), reqIdr, init_discarded(),
        primaryRepl.batchDigest([request2]), DOMAIN_LEDGER_ID,
        primaryRepl._ordering_service.get_state_root_hash(DOMAIN_LEDGER_ID),
        primaryRepl._ordering_service.get_txn_root_hash(DOMAIN_LEDGER_ID), 0,
        True, [])

    logger.debug("""Checking whether all the non primary replicas have received
                the pre-prepare request with same sequence number""")
    timeout = waits.expectedPrePrepareTime(len(txnPoolNodeSet))
    looper.run(
        eventually(checkPrePrepareReqRecvd,
                   nonPrimaryReplicas,
                   prePrepareReq,
                   retryWait=1,
                   timeout=timeout))
    logger.debug("""Check that none of the non primary replicas didn't send
    any prepare message "
                             in response to the pre-prepare message""")
    timeout = waits.expectedPrepareTime(len(txnPoolNodeSet))
    looper.runFor(timeout)  # expect prepare processing timeout

    # check if prepares have not been sent
    for npr in nonPrimaryReplicas:
        with pytest.raises(AssertionError):
            looper.run(
                eventually(checkPrepareReqSent,
                           npr,
                           request2.key,
                           view_no,
                           retryWait=1,
                           timeout=timeout))

    timeout = waits.expectedTransactionExecutionTime(
        len(txnPoolNodeSet)) + c_delay
    result1 = sdk_get_replies(looper, [req1])[0][1]
    logger.debug("request {} gives result {}".format(request1, result1))
コード例 #33
0
def test_stashed_messages_processed_on_backup_replica_ordering_resumption(
        looper, chkFreqPatched, reqs_for_checkpoint,
        one_replica_and_others_in_backup_instance,
        sdk_pool_handle, sdk_wallet_client, view_change_done,
        txnPoolNodeSet):
    """
    Verifies resumption of ordering 3PC-batches on a backup replica
    on detection of a lag in checkpoints in case it is detected after
    some 3PC-messages related to the next checkpoint have already been stashed
    as laying outside of the watermarks.
    Please note that to verify this case the config is set up so that
    LOG_SIZE == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
    """
    global first_run
    batches_count = get_pp_seq_no(txnPoolNodeSet)

    slow_replica, other_replicas = one_replica_and_others_in_backup_instance
    view_no = slow_replica.viewNo
    low_watermark = slow_replica.h

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    batches_count += 1

    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, batches_count)),
                   slow_replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Don't receive Commits from two replicas
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[0].node.name))
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[1].node.name))
    slow_replica.node.nodeIbStasher.delay(
        msg_rep_delay(types_to_delay=[COMMIT])
    )

    # Send a request for which the replica will not be able to order the batch
    # due to an insufficient count of Commits
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Receive further Commits from now on
    slow_replica.node.nodeIbStasher.drop_delayeds()
    slow_replica.node.nodeIbStasher.resetDelays()

    # Send requests but in a quantity insufficient
    # for catch-up number of checkpoints
    reqs_until_checkpoints = reqs_for_checkpoint - get_pp_seq_no([r.node for r in other_replicas]) % reqs_for_checkpoint
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
                             reqs_until_checkpoints)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Don't receive Checkpoints
    slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1))

    # Send more requests to reach catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             reqs_for_checkpoint)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Ensure that there are no 3PC-messages stashed
    # as laying outside of the watermarks
    assert slow_replica.stasher.stash_size(STASH_WATERMARKS) == 0

    # Send a request for which the batch will be outside of the watermarks
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Ensure that the replica has not ordered any batches
    # after the very first one
    assert slow_replica.last_ordered_3pc == (view_no, batches_count)

    # Ensure that the watermarks have not been shifted since the view start
    assert slow_replica.h == low_watermark
    assert slow_replica.H == (sys.maxsize if first_run else low_watermark + LOG_SIZE)

    # Ensure that there are some quorumed stashed checkpoints
    check_num_quorumed_received_checkpoints(slow_replica, 1)

    # Ensure that now there are 3PC-messages stashed
    # as laying outside of the watermarks
    if not first_run:
        assert slow_replica.stasher.stash_size(STASH_WATERMARKS) == incoming_3pc_msgs_count(len(txnPoolNodeSet))

    # Receive belated Checkpoints
    slow_replica.node.nodeIbStasher.reset_delays_and_process_delayeds()
    batches_count = get_pp_seq_no([r.node for r in other_replicas])

    # Ensure that the replica has ordered the batch for the last sent request
    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
                                     (view_no, batches_count)),
                   slow_replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that the watermarks have been shifted so that the lower watermark
    # now equals to the end of the last stable checkpoint in the instance
    assert slow_replica.h == low_watermark + (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
    assert slow_replica.H == low_watermark + (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE

    # Ensure that now there are no quorumed stashed checkpoints
    check_num_quorumed_received_checkpoints(slow_replica, 0)

    # Ensure that now there are no 3PC-messages stashed
    # as laying outside of the watermarks
    assert slow_replica.stasher.stash_size(STASH_WATERMARKS) == 0

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    batches_count += 1

    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
                                           (view_no, batches_count)),
                   slow_replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
    first_run = False
def test_backup_primary_restores_pp_seq_no_if_view_is_same(
        looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf,
        tdir, allPluginsPath, chkFreqPatched, view_no):
    # Get a node with a backup primary replica
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    batches_count = 0 if view_no == 0 else 1
    node = replica.node
    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client,
                               num_reqs=7,
                               num_batches=num_batches,
                               timeout=tconf.Max3PCBatchWait)
    batches_count += num_batches

    looper.run(
        eventually(lambda r: assertExp(r.last_ordered_3pc ==
                                       (view_no, batches_count)),
                   replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Check view no of the node and lastPrePrepareSeqNo of the replica
    assert node.viewNo == view_no
    assert replica.lastPrePrepareSeqNo == batches_count

    # Ensure that the node has stored the last sent PrePrepare key
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB
    last_sent_pre_prepare_key = \
        node_status_db_serializer.deserialize(
            node.nodeStatusDB.get(LAST_SENT_PRE_PREPARE))
    assert last_sent_pre_prepare_key == {
        str(backup_inst_id): [view_no, batches_count]
    }

    # Restart the node containing the replica
    disconnect_node_and_ensure_disconnected(looper,
                                            txnPoolNodeSet,
                                            node.name,
                                            stopNode=True)
    looper.removeProdable(node)
    txnPoolNodeSet.remove(node)

    node = start_stopped_node(node, looper, tconf, tdir, allPluginsPath)
    txnPoolNodeSet.append(node)

    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet)

    replica = node.replicas[backup_inst_id]

    # Verify that after the successful propagate primary procedure the replica
    # (which must still be the primary in its instance) has restored
    # lastPrePrepareSeqNo and adjusted last_ordered_3pc and shifted
    # the watermarks correspondingly
    assert node.viewNo == view_no
    assert replica.isPrimary
    assert replica.lastPrePrepareSeqNo == batches_count
    assert replica.last_ordered_3pc == (view_no, batches_count)
    assert replica.h == batches_count
    assert replica.H == batches_count + LOG_SIZE

    # Verify also that the stored last sent PrePrepare key has not been erased
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client,
                               num_reqs=1,
                               num_batches=1,
                               timeout=tconf.Max3PCBatchWait)
    batches_count += 1
    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc ==
                                     (view_no, batches_count)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
コード例 #35
0
def test_second_checkpoint_after_catchup_can_be_stabilized(
        chkFreqPatched, looper, txnPoolNodeSet, sdk_wallet_steward,
        sdk_wallet_client, sdk_pool_handle, tdir, tconf, allPluginsPath):
    _, new_node = sdk_add_new_steward_and_node(looper,
                                               sdk_pool_handle,
                                               sdk_wallet_steward,
                                               'EpsilonSteward',
                                               'Epsilon',
                                               tdir,
                                               tconf,
                                               allPluginsPath=allPluginsPath)
    txnPoolNodeSet.append(new_node)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
    # Epsilon did not participate in ordering of the batch with EpsilonSteward
    # NYM transaction and the batch with Epsilon NODE transaction.
    # Epsilon got these transactions via catch-up.

    master_replica = new_node.replicas._master_replica

    assert len(master_replica.checkpoints) == 0

    assert len(master_replica.stashedRecvdCheckpoints) == 0

    assert master_replica.h == 2
    assert master_replica.H == 17

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    for replica in new_node.replicas.values():
        assert len(replica.checkpoints) == 1

        assert len(replica.stashedRecvdCheckpoints) == 0

        assert replica.h == 2
        assert replica.H == 17

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 6)
    stabilization_timeout = \
        waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    looper.runFor(stabilization_timeout)

    for replica in new_node.replicas.values():
        assert len(replica.checkpoints) == 2
        keys_iter = iter(replica.checkpoints)

        assert next(keys_iter) == (3, 5)
        assert replica.checkpoints[3, 5].seqNo == 5
        assert replica.checkpoints[3, 5].digest is None
        assert replica.checkpoints[3, 5].isStable is False

        assert next(keys_iter) == (6, 10)
        assert replica.checkpoints[6, 10].seqNo == 9
        assert replica.checkpoints[6, 10].digest is None
        assert replica.checkpoints[6, 10].isStable is False

        # nothing is stashed since it's ordered during catch-up
        assert len(replica.stashedRecvdCheckpoints) == 0

        assert replica.h == 2
        assert replica.H == 17

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)
    looper.runFor(stabilization_timeout)

    for replica in new_node.replicas.values():
        assert len(replica.checkpoints) == 1
        keys_iter = iter(replica.checkpoints)

        assert next(keys_iter) == (6, 10)
        assert replica.checkpoints[6, 10].seqNo == 10
        assert replica.checkpoints[6, 10].digest is not None
        assert replica.checkpoints[6, 10].isStable is True

        assert len(replica.stashedRecvdCheckpoints) == 0

        assert replica.h == 10
        assert replica.H == 25
コード例 #36
0
def waitForReply(cli, nodeCount, replyChecker, customTimeout=None):
    timeout = customTimeout or \
        waits.expectedTransactionExecutionTime(nodeCount)
    cli.looper.run(
        eventually(checkReply, cli, nodeCount, replyChecker, timeout=timeout))
def test_backup_replica_resumes_ordering_on_lag_in_checkpoints(
        looper, chkFreqPatched, reqs_for_checkpoint,
        one_replica_and_others_in_backup_instance,
        sdk_pool_handle, sdk_wallet_client, view_change_done, txnPoolNodeSet):
    """
    Verifies resumption of ordering 3PC-batches on a backup replica
    on detection of a lag in checkpoints
    """

    slow_replica, other_replicas = one_replica_and_others_in_backup_instance
    view_no = slow_replica.viewNo

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, 2)),
                   slow_replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Don't receive Commits from two replicas
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[0].node.name))
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[1].node.name))

    # Send a request for which the replica will not be able to order the batch
    # due to an insufficient count of Commits
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Recover reception of Commits
    slow_replica.node.nodeIbStasher.drop_delayeds()
    slow_replica.node.nodeIbStasher.resetDelays()

    # Send requests but in a quantity insufficient
    # for catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
                             reqs_for_checkpoint - 3)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Ensure that the replica has not ordered any batches
    # after the very first one
    assert slow_replica.last_ordered_3pc == (view_no, 2)

    # Ensure that the watermarks have not been shifted since the view start
    assert slow_replica.h == 0
    assert slow_replica.H == LOG_SIZE

    # Ensure that the collections related to requests, batches and
    # own checkpoints are not empty.
    # (Note that a primary replica removes requests from requestQueues
    # when creating a batch with them.)
    if slow_replica.isPrimary:
        assert slow_replica.sentPrePrepares
    else:
        assert slow_replica.requestQueues[DOMAIN_LEDGER_ID]
        assert slow_replica.prePrepares
    assert slow_replica.prepares
    assert slow_replica.commits
    assert slow_replica.batches
    assert slow_replica._checkpointer._checkpoint_state
    print(slow_replica._checkpointer._stashed_recvd_checkpoints)

    # Ensure that there are some quorumed stashed checkpoints
    assert slow_replica._checkpointer._stashed_checkpoints_with_quorum()

    # Send more requests to reach catch-up number of checkpoints
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, reqs_for_checkpoint)

    # Ensure that the replica has adjusted last_ordered_3pc to the end
    # of the last checkpoint
    def chk(r):
        print(r)
        assert r.last_ordered_3pc == (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ)
    looper.run(
        eventually(chk,
                   slow_replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that the watermarks have been shifted so that the lower watermark
    # has the same value as last_ordered_3pc
    assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
    assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE

    # Ensure that the collections related to requests, batches and
    # own checkpoints have been cleared
    assert not slow_replica.requestQueues[DOMAIN_LEDGER_ID]
    assert not slow_replica.sentPrePrepares
    assert not slow_replica.prePrepares
    assert not slow_replica.prepares
    assert not slow_replica.commits
    assert not slow_replica.batches
    assert not slow_replica._checkpointer._checkpoint_state

    # Ensure that now there are no quorumed stashed checkpoints
    assert not slow_replica._checkpointer._stashed_checkpoints_with_quorum()

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc ==
                                     (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 1)),
                   slow_replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
コード例 #38
0
ファイル: test_client.py プロジェクト: lovesh/indy-plenum
def testReplyMatchesRequest(looper, nodeSet, tdir, up):
    '''
    This tests does check following things:
      - wallet works correctly when used by multiple clients
      - clients do receive responses for exactly the same request they sent
    '''
    def makeClient(id):
        client, wallet = genTestClient(nodeSet,
                                       tmpdir=tdir,
                                       name="client-{}".format(id))
        looper.add(client)
        looper.run(client.ensureConnectedToNodes())
        return client, wallet

    # creating clients
    numOfClients = 3
    numOfRequests = 1

    clients = set()
    sharedWallet = None
    for i in range(numOfClients):
        client, wallet = makeClient(i)
        if sharedWallet is None:
            sharedWallet = wallet
        clients.add(client)

    for i in range(1, numOfRequests + 1):
        # sending requests
        requests = {}
        for client in clients:
            op = randomOperation()
            req = sharedWallet.signOp(op)

            request = client.submitReqs(req)[0][0]
            requests[client] = (request.reqId, request.operation['amount'])

        # checking results
        responseTimeout = waits.expectedTransactionExecutionTime(nodeCount)
        for client, (reqId, sentAmount) in requests.items():
            looper.run(
                eventually(checkResponseRecvdFromNodes,
                           client,
                           nodeCount,
                           reqId,
                           retryWait=1,
                           timeout=responseTimeout))

            print("Expected amount for request {} is {}".format(
                reqId, sentAmount))

            # This looks like it fails on some python versions
            # replies = [r[0]['result']['amount']
            #            for r in client.inBox
            #            if r[0]['op'] == 'REPLY'
            #            and r[0]['result']['reqId'] == reqId]

            replies = []
            for r in client.inBox:
                if r[0]['op'] == 'REPLY' and r[0]['result']['reqId'] == reqId:
                    if 'amount' not in r[0]['result']:
                        logger.debug('{} cannot find amount in {}'.format(
                            client, r[0]['result']))
                    replies.append(r[0]['result']['amount'])

            assert all(replies[0] == r for r in replies)
            assert replies[0] == sentAmount
コード例 #39
0
def test_request_older_than_stable_checkpoint_removed(chkFreqPatched, looper,
                                                      txnPoolNodeSet,
                                                      sdk_pool_handle,
                                                      sdk_wallet_steward,
                                                      reqs_for_checkpoint):
    timeout = waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    max_batch_size = chkFreqPatched.Max3PCBatchSize

    # Send some requests (insufficient for checkpoint),
    # wait replies and check that current checkpoint is not stable
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, 2 * max_batch_size)
    looper.run(
        eventually(check_for_nodes,
                   txnPoolNodeSet,
                   check_stable_checkpoint,
                   0,
                   retryWait=1,
                   timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size, 2)

    # From the steward send a request creating a user with None role
    sdk_wallet_user = sdk_add_new_nym(looper, sdk_pool_handle,
                                      sdk_wallet_steward)
    looper.run(
        eventually(check_for_nodes,
                   txnPoolNodeSet,
                   check_stable_checkpoint,
                   0,
                   retryWait=1,
                   timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size + 1, 3)

    # From the created user send a request creating another user.
    # Dynamic validation of this request must fail since a user with None role cannot create users.
    # However, the 3PC-batch with the sent request must be ordered.
    with pytest.raises(RequestRejectedException):
        sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_user)
    looper.run(
        eventually(check_for_nodes,
                   txnPoolNodeSet,
                   check_stable_checkpoint,
                   0,
                   retryWait=1,
                   timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 2 * max_batch_size + 2, 4)

    # Send more requests to cause checkpoint stabilization
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, max_batch_size)
    # Check that checkpoint is stable now
    # and verify that requests for it were removed
    looper.run(
        eventually(check_for_nodes,
                   txnPoolNodeSet,
                   check_stable_checkpoint,
                   5,
                   retryWait=1,
                   timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 0, 0)

    # Send more requests to cause new checkpoint
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_steward, reqs_for_checkpoint + 1)
    looper.run(
        eventually(check_for_nodes,
                   txnPoolNodeSet,
                   check_stable_checkpoint,
                   10,
                   retryWait=1,
                   timeout=timeout))
    checkRequestCounts(txnPoolNodeSet, 1, 1)
コード例 #40
0
def test_stashed_checkpoint_processing(chkFreqPatched, looper, txnPoolNodeSet,
                                       sdk_wallet_client, sdk_pool_handle):
    """
    One node in a pool of 5 nodes lags to order the last 3PC-batch in a
    checkpoint. By the moment when it eventually orders the 3PC-batch it has
    already received and stashed Checkpoint message from two node, so it
    processes these stashed messages on completing the checkpoint. After this
    it receives Checkpoint messages from two other nodes, processes them and
    stabilizes the checkpoint.
    """
    epsilon = txnPoolNodeSet[-1]

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 4)

    epsilon.nodeIbStasher.delay(cDelay())
    epsilon.nodeIbStasher.delay(chk_delay(sender_filter='Gamma'))
    epsilon.nodeIbStasher.delay(chk_delay(sender_filter='Delta'))

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 1)

    stabilization_timeout = \
        waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    looper.runFor(stabilization_timeout)

    for inst_id, replica in epsilon.replicas:
        assert len(replica.checkpoints) == 1
        assert (1, 5) in replica.checkpoints
        assert replica.checkpoints[(1, 5)].seqNo == 4
        assert replica.checkpoints[(1, 5)].digest is None
        assert replica.checkpoints[(1, 5)].isStable is False

        assert len(replica.stashedRecvdCheckpoints) == 1
        assert 0 in replica.stashedRecvdCheckpoints
        assert len(replica.stashedRecvdCheckpoints[0]) == 1
        assert (1, 5) in replica.stashedRecvdCheckpoints[0]
        assert len(replica.stashedRecvdCheckpoints[0][(1, 5)]) == 2

    epsilon.nodeIbStasher.reset_delays_and_process_delayeds(COMMIT)

    def check():
        for inst_id, replica in epsilon.replicas:
            assert len(replica.checkpoints) == 1
            assert (1, 5) in replica.checkpoints
            assert replica.checkpoints[(1, 5)].seqNo == 5
            assert replica.checkpoints[(1, 5)].digest is not None
            assert replica.checkpoints[(1, 5)].isStable is False

            assert len(replica.stashedRecvdCheckpoints) == 0

    looper.run(
        eventually(check,
                   timeout=waits.expectedOrderingTime(len(txnPoolNodeSet))))

    epsilon.nodeIbStasher.reset_delays_and_process_delayeds(CHECKPOINT)

    stabilization_timeout = \
        waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
    looper.runFor(stabilization_timeout)

    for inst_id, replica in epsilon.replicas:
        assert len(replica.checkpoints) == 1
        assert (1, 5) in replica.checkpoints
        assert replica.checkpoints[(1, 5)].seqNo == 5
        assert replica.checkpoints[(1, 5)].digest is not None
        assert replica.checkpoints[(1, 5)].isStable is True

        assert len(replica.stashedRecvdCheckpoints) == 0
コード例 #41
0
def test_non_primary_recvs_3phase_message_outside_watermarks(
        chkFreqPatched, reqs_for_logsize, looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client):
    """
    A node is slow in receiving PRE-PREPAREs and PREPAREs. A lot of requests
    are sent and the slow node has started receiving COMMITs outside of its
    watermarks and so stashes them. Also this node is slow in receiving
    CHECKPOINTs. So a catch-up does not occur on it.

    Then the slow node eventually receives the sent PRE-PREPAREs and PREPAREs
    and so orders the 3PC-batches between its watermarks. The other nodes
    discard the COMMITs from the slow node since they have already achieved
    stable checkpoints for these COMMITs.

    After that the slow node eventually receives the sent CHECKPOINTs from
    the other nodes and so stabilizes own completed checkpoints and updates its
    watermarks. A catch-up is not triggered because no received checkpoints are
    stashed. Since now the watermarks have been updated, the slow node
    processes 3PC-messages stashed earlier and its ledger becomes equal to the
    ledgers of the other nodes.
    """
    backupInstId = 1
    npr = getNonPrimaryReplicas(txnPoolNodeSet, backupInstId)

    slowReplica = npr[0]
    slowNode = slowReplica.node

    slowNode.nodeIbStasher.delay(ppDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(pDelay(300, backupInstId))
    slowNode.nodeIbStasher.delay(chk_delay(300))

    initialDomainLedgerSize = slowNode.domainLedger.size
    oldStashCount = slowReplica.stasher.num_stashed_watermarks
    slowReplica.H = LOG_SIZE
    # 1. Send requests more than fit between the watermarks on the slow node
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, reqs_for_logsize + 2)

    # Verify that the slow node stashes the batches outside of its watermarks
    newStashCount = slowReplica.stasher.num_stashed_watermarks
    assert newStashCount > oldStashCount

    oldDiscardCounts = discardCounts([n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
                                     'achieved stable checkpoint')

    # 2. Deliver the sent PREPREPAREs and PREPAREs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(PREPREPARE, PREPARE)

    # Verify that the slow node orders the 3PC-batches between its watermarks
    # but no more.
    looper.runFor(waits.expectedTransactionExecutionTime(len(txnPoolNodeSet)))

    checkNodeDataForInequality(slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize

    # Also verify that the other nodes discard the COMMITs from the slow node
    # since they have already achieved stable checkpoints for these COMMITs.
    counts = discardCounts(
        [n.replicas[backupInstId] for n in txnPoolNodeSet if n != slowNode],
        'achieved stable checkpoint')
    for nm, count in counts.items():
        assert count > oldDiscardCounts[nm]

    oldCatchupTimes = slowNode.spylog.count(Node.start_catchup)

    # 3. Deliver the sent CHECKPOINTs to the slow node
    slowNode.nodeIbStasher.reset_delays_and_process_delayeds(CHECKPOINT)

    # Verify that the slow node processes 3PC-messages stashed earlier and its
    # ledger becomes equal to the ledgers of the other nodes while a catch-up
    # is not made.
    waitNodeDataEquality(looper, slowNode, *[n for n in txnPoolNodeSet if n != slowNode])
    assert slowNode.domainLedger.size - initialDomainLedgerSize == reqs_for_logsize + 2
    newCatchupTimes = slowNode.spylog.count(Node.start_catchup)
    assert newCatchupTimes == oldCatchupTimes
コード例 #42
0
ファイル: helper.py プロジェクト: AshutoshJha007/indy-plenum
def wait_for_replies(looper, client, idr, reqId, count, custom_timeout=None):
    timeout = custom_timeout or waits.expectedTransactionExecutionTime(
        len(client.nodeReg))
    looper.run(
        eventually(checkReplyCount, client, idr, reqId, count,
                   timeout=timeout))
コード例 #43
0
ファイル: conftest.py プロジェクト: whaleyu/indy-node
def nodeThetaAdded(looper,
                   nodeSet,
                   tdirWithClientPoolTxns,
                   tconf,
                   steward,
                   stewardWallet,
                   allPluginsPath,
                   testNodeClass,
                   testClientClass,
                   node_config_helper_class,
                   tdir,
                   node_name='Theta'):
    newStewardName = "testClientSteward" + randomString(3)
    newNodeName = node_name
    newSteward, newStewardWallet = getClientAddedWithRole(
        nodeSet,
        tdirWithClientPoolTxns,
        looper,
        steward,
        stewardWallet,
        newStewardName,
        role=STEWARD)

    sigseed = randomString(32).encode()
    nodeSigner = SimpleSigner(seed=sigseed)

    (nodeIp, nodePort), (clientIp, clientPort) = genHa(2)

    config_helper = node_config_helper_class(newNodeName, tconf, chroot=tdir)

    _, _, bls_key = initNodeKeysForBothStacks(newNodeName,
                                              config_helper.keys_dir,
                                              sigseed,
                                              override=True)

    data = {
        NODE_IP: nodeIp,
        NODE_PORT: nodePort,
        CLIENT_IP: clientIp,
        CLIENT_PORT: clientPort,
        ALIAS: newNodeName,
        SERVICES: [
            VALIDATOR,
        ],
        BLS_KEY: bls_key
    }

    node = Node(nodeSigner.identifier, data, newStewardWallet.defaultId)

    newStewardWallet.addNode(node)
    reqs = newStewardWallet.preparePending()
    req = newSteward.submitReqs(*reqs)[0][0]

    waitForSufficientRepliesForRequests(looper, newSteward, requests=[req])

    def chk():
        assert newStewardWallet.getNode(node.id).seqNo is not None

    timeout = plenumWaits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(chk, retryWait=1, timeout=timeout))

    newNode = testNodeClass(newNodeName,
                            config_helper=config_helper,
                            config=tconf,
                            ha=(nodeIp, nodePort),
                            cliha=(clientIp, clientPort),
                            pluginPaths=allPluginsPath)

    nodeSet.append(newNode)
    looper.add(newNode)
    looper.run(checkNodesConnected(nodeSet))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward, *nodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward, *nodeSet)
    return newSteward, newStewardWallet, newNode
def test_backup_replica_resumes_ordering_on_lag_if_checkpoints_belate(
        looper, chkFreqPatched, reqs_for_checkpoint,
        one_replica_and_others_in_backup_instance,
        sdk_pool_handle, sdk_wallet_client, view_change_done):
    """
    Verifies resumption of ordering 3PC-batches on a backup replica
    on detection of a lag in checkpoints in case it is detected after
    some batch in the next checkpoint has already been committed but cannot
    be ordered out of turn
    """

    slow_replica, other_replicas = one_replica_and_others_in_backup_instance
    view_no = slow_replica.viewNo

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, 2)),
                   slow_replica,
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Don't receive Commits from two replicas
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[0].node.name))
    slow_replica.node.nodeIbStasher.delay(
        cDelay(instId=1, sender_filter=other_replicas[1].node.name))

    # Send a request for which the replica will not be able to order the batch
    # due to an insufficient count of Commits
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Receive further Commits from now on
    slow_replica.node.nodeIbStasher.drop_delayeds()
    slow_replica.node.nodeIbStasher.resetDelays()
    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == (view_no, 2)),
                   slow_replica,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Send requests but in a quantity insufficient
    # for catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP *
                             reqs_for_checkpoint - 2)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Don't receive Checkpoints
    slow_replica.node.nodeIbStasher.delay(chk_delay(instId=1))

    # Send more requests to reach catch-up number of checkpoints
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                             reqs_for_checkpoint)
    # Send a request that starts a new checkpoint
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)
    looper.runFor(waits.expectedTransactionExecutionTime(nodeCount))

    # Ensure that the replica has not ordered any batches
    # after the very first one
    assert slow_replica.last_ordered_3pc == (view_no, 2)

    # Ensure that the watermarks have not been shifted since the view start
    assert slow_replica.h == 0
    assert slow_replica.H == LOG_SIZE

    # Ensure that there are some quorumed stashed checkpoints
    assert slow_replica._checkpointer._stashed_checkpoints_with_quorum()

    # Receive belated Checkpoints
    slow_replica.node.nodeIbStasher.reset_delays_and_process_delayeds()

    # Ensure that the replica has ordered the batch for the last sent request
    looper.run(
        eventually(lambda *args: assertExp(slow_replica.last_ordered_3pc == \
                                           (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 2)),
                   slow_replica,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Ensure that the watermarks have been shifted so that the lower watermark
    # now equals to the end of the last stable checkpoint in the instance
    assert slow_replica.h == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ
    assert slow_replica.H == (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + LOG_SIZE

    # Ensure that now there are no quorumed stashed checkpoints
    assert not slow_replica._checkpointer._stashed_checkpoints_with_quorum()

    # Send a request and ensure that the replica orders the batch for it
    sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, 1)

    looper.run(
        eventually(lambda: assertExp(slow_replica.last_ordered_3pc ==
                                     (view_no, (Replica.STASHED_CHECKPOINTS_BEFORE_CATCHUP + 1) * CHK_FREQ + 3)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))
コード例 #45
0
def testTransactions(cli, loadAuctionReqPlugin,
                     createAllNodes, validNodeNames):
    nodeCount = len(validNodeNames)
    auctionId = str(uuid4())
    names = ["Jason", "John", "Les", "Timothy", "Tyler"]
    for name in names:
        createClientAndConnect(cli, validNodeNames, name)
    jason, tyler, les, john, timothy = names

    timeout = waits.expectedTransactionExecutionTime(nodeCount)
    cli.enterCmd("client {} start auction {}".format(jason, auctionId))
    waitRequestSuccess(cli, nodeCount * 1, customTimeout=timeout)

    cli.enterCmd("client {} place bid 20 on {}".format(tyler, auctionId))
    waitRequestSuccess(cli, nodeCount * 2, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(tyler))
    waitRequestSuccess(cli, nodeCount * 3, customTimeout=timeout)

    waitBalanceChange(cli, nodeCount, 980, customTimeout=timeout)

    cli.enterCmd("client {} place bid 40 on {}".format(les, auctionId))
    waitRequestSuccess(cli, nodeCount * 4, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(tyler))
    waitRequestSuccess(cli, nodeCount * 5, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount, 1000, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(les))
    waitRequestSuccess(cli, nodeCount * 6, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount, 960, customTimeout=timeout)

    # This bid would fail so `success` would be false and thus success count
    # wont increase
    cli.enterCmd("client {} place bid 30 on {}".format(john, auctionId))
    waitRequestSuccess(cli, nodeCount * 6, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(john))
    waitRequestSuccess(cli, nodeCount * 7, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 2, 1000, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(les))
    waitRequestSuccess(cli, nodeCount * 8, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 2, 960, customTimeout=timeout)

    cli.enterCmd("client {} place bid 200 on {}".format(timothy, auctionId))
    waitRequestSuccess(cli, nodeCount * 9, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(timothy))
    waitRequestSuccess(cli, nodeCount * 10, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount, 800, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(les))
    waitRequestSuccess(cli, nodeCount * 11, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 3, 1000, customTimeout=timeout)

    cli.enterCmd("client {} end auction {}".format(jason, auctionId))
    waitRequestSuccess(cli, nodeCount * 12, customTimeout=timeout)

    cli.enterCmd("client {} place bid 300 on {}".format(john, auctionId))
    waitRequestSuccess(cli, nodeCount * 12, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(john))
    waitRequestSuccess(cli, nodeCount * 13, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 4, 1000, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(tyler))
    waitRequestSuccess(cli, nodeCount * 14, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 5, 1000, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(john))
    waitRequestSuccess(cli, nodeCount * 15, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 6, 1000, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(les))
    waitRequestSuccess(cli, nodeCount * 16, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 7, 1000, customTimeout=timeout)

    cli.enterCmd("client {} balance".format(timothy))
    waitRequestSuccess(cli, nodeCount * 17, customTimeout=timeout)
    waitBalanceChange(cli, nodeCount * 2, 800, customTimeout=timeout)
コード例 #46
0
def checkIdentityRequestSucceed(looper, actingClient, actingWallet, idr):
    def chk():
        assert actingWallet.getTrustAnchoredIdentity(idr).seqNo is not None

    timeout = waits.expectedTransactionExecutionTime(len(actingClient.nodeReg))
    looper.run(eventually(chk, retryWait=1, timeout=timeout))
def test_node_erases_last_sent_pp_key_on_pool_restart(looper, txnPoolNodeSet,
                                                      sdk_pool_handle,
                                                      sdk_wallet_client, tconf,
                                                      tdir, allPluginsPath,
                                                      chkFreqPatched):

    # Get a node with a backup primary replica and the rest of the nodes
    replica = getPrimaryReplica(txnPoolNodeSet, instId=backup_inst_id)
    node = replica.node

    # Send some 3PC-batches and wait until the replica orders the 3PC-batches
    sdk_send_batches_of_random(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client,
                               num_reqs=7,
                               num_batches=7,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 7)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))

    # Check view no of the node and lastPrePrepareSeqNo of the replica
    assert node.viewNo == 0
    assert replica.lastPrePrepareSeqNo == 7
    assert replica.h == 6
    assert replica.H == 6 + LOG_SIZE

    # Ensure that there is a stored last sent PrePrepare key on the node
    assert LAST_SENT_PRE_PREPARE in node.nodeStatusDB

    # Restart all the nodes in the pool and wait for primary elections done
    all_nodes = copy(txnPoolNodeSet)
    for n in all_nodes:
        disconnect_node_and_ensure_disconnected(looper,
                                                txnPoolNodeSet,
                                                n.name,
                                                timeout=nodeCount,
                                                stopNode=True)
        looper.removeProdable(n)
        txnPoolNodeSet.remove(n)
    for n in all_nodes:
        txnPoolNodeSet.append(
            start_stopped_node(n, looper, tconf, tdir, allPluginsPath))
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet)

    node = nodeByName(txnPoolNodeSet, node.name)
    replica = node.replicas[backup_inst_id]

    # Verify that the node has erased the stored last sent PrePrepare key
    assert LAST_SENT_PRE_PREPARE not in node.nodeStatusDB

    # Verify correspondingly that after the pool restart the replica
    # (which must again be the primary in its instance) has not restored
    # lastPrePrepareSeqNo, not adjusted last_ordered_3pc and not shifted
    # the watermarks
    assert node.viewNo == 0
    assert replica.isPrimary
    assert replica.lastPrePrepareSeqNo == 0
    assert replica.last_ordered_3pc == (0, 0)
    assert replica.h == 0
    assert replica.H == 0 + LOG_SIZE

    # Send a 3PC-batch and ensure that the replica orders it
    sdk_send_batches_of_random(looper,
                               txnPoolNodeSet,
                               sdk_pool_handle,
                               sdk_wallet_client,
                               num_reqs=1,
                               num_batches=1,
                               timeout=tconf.Max3PCBatchWait)

    looper.run(
        eventually(lambda: assertExp(replica.last_ordered_3pc == (0, 1)),
                   retryWait=1,
                   timeout=waits.expectedTransactionExecutionTime(nodeCount)))