Ejemplo n.º 1
0
def changeNodeKeys(looper, stewardClient, stewardWallet, node, verkey):
    nodeNym = hexToFriendly(node.nodestack.verhex)

    op = {
        TXN_TYPE: NODE,
        TARGET_NYM: nodeNym,
        VERKEY: verkey,
        DATA: {
            ALIAS: node.name
        }
    }
    req = stewardWallet.signOp(op)
    stewardClient.submitReqs(req)

    waitForSufficientRepliesForRequests(looper,
                                        stewardClient,
                                        requests=[req],
                                        fVal=1)

    node.nodestack.clearLocalRoleKeep()
    node.nodestack.clearRemoteRoleKeeps()
    node.nodestack.clearAllDir()
    node.clientstack.clearLocalRoleKeep()
    node.clientstack.clearRemoteRoleKeeps()
    node.clientstack.clearAllDir()
Ejemplo n.º 2
0
def addNewClient(role, looper, creatorClient: Client, creatorWallet: Wallet,
                 name: str):
    req, wallet = sendAddNewClient(role, name, creatorClient, creatorWallet)
    waitForSufficientRepliesForRequests(looper, creatorClient,
                                        requests=[req])

    return wallet
Ejemplo n.º 3
0
def test_client_resends_not_confirmed_request(looper, client1, wallet1,
                                              txnPoolNodeSet):
    """
    Check that client resends request to all nodes if it was previously sent
    to one node but reply cannot be verified
    """
    client = client1
    wallet = wallet1

    initial_submit_count = client.spylog.count(client.submitReqs)
    initial_resent_count = client.spylog.count(client.resendRequests)

    def sign_and_send(op):
        signed = wallet.signOp(op)
        return send_signed_requests(client, [signed])

    buy = {'type': 'buy', 'amount': random.randint(10, 100)}
    requests = sign_and_send(buy)
    waitForSufficientRepliesForRequests(looper, client, requests=requests)

    buy = {'type': 'get_buy'}
    client._read_only_requests.add('get_buy')
    requests = sign_and_send(buy)
    waitForSufficientRepliesForRequests(looper, client, requests=requests)

    # submitReqs should be called twice: first for but and then got get_buy
    assert initial_submit_count + 2 == \
           client.spylog.count(client.submitReqs)

    assert initial_resent_count + 1 == \
           client.spylog.count(client.resendRequests)
Ejemplo n.º 4
0
def test_dirty_read(looper, nodeSet, client1, wallet1):
    """
    Tests the case when read request comes before write request is
    not executed on some nodes
    """

    slow_nodes = list(nodeSet)[2:4]
    for node in slow_nodes:
        logger.debug("Making node {} slow".format(node))
        make_node_slow(node)

    set_request = sendReqsToNodesAndVerifySuffReplies(looper,
                                                      wallet1,
                                                      client1,
                                                      numReqs=1)[0]

    received_replies = getRepliesFromClientInbox(inbox=client1.inBox,
                                                 reqId=set_request.reqId)

    seq_no = received_replies[0]["result"]["seqNo"]
    get_request = [wallet1.signOp({"type": GET_TXN, DATA: seq_no})]
    send_signed_requests(client1, get_request)
    waitForSufficientRepliesForRequests(looper, client1, requests=get_request)
    received_replies = getRepliesFromClientInbox(inbox=client1.inBox,
                                                 reqId=get_request[0].reqId)
    results = [str(reply['result'][DATA]) for reply in received_replies]

    assert len(set(results)) == 1
Ejemplo n.º 5
0
def submit_operation_and_get_replies(looper, wallet, client, operation):
    request = wallet.signOp(operation)
    wallet.pendRequest(request)
    pending = wallet.preparePending()
    client.submitReqs(*pending)
    waitForSufficientRepliesForRequests(looper, client, requests=pending)
    return getRepliesFromClientInbox(client.inBox, request.reqId)
def test_state_proof_returned_for_get_attr(looper,
                                           addedRawAttribute,
                                           attributeName,
                                           attributeData,
                                           trustAnchor,
                                           trustAnchorWallet):
    """
    Tests that state proof is returned in the reply for GET_ATTR transactions
    """
    client = trustAnchor
    get_attr_operation = {
        TARGET_NYM: addedRawAttribute.dest,
        TXN_TYPE: GET_ATTR,
        RAW: attributeName
    }
    get_attr_request = trustAnchorWallet.signOp(get_attr_operation)
    trustAnchorWallet.pendRequest(get_attr_request)
    pending = trustAnchorWallet.preparePending()
    client.submitReqs(*pending)
    waitForSufficientRepliesForRequests(looper, trustAnchor, requests=pending)
    replies = getRepliesFromClientInbox(client.inBox, get_attr_request.reqId)
    expected_data = attrib_raw_data_serializer.deserialize(attributeData)
    for reply in replies:
        result = reply['result']
        assert DATA in result
        data = attrib_raw_data_serializer.deserialize(result[DATA])
        assert data == expected_data
        assert result[TXN_TIME]
        check_valid_proof(reply, client)
Ejemplo n.º 7
0
 def sendMoney(self, to: str, amount: int, nodes, expected: bool = True):
     req = self.submit({
         TXN_TYPE: CREDIT,
         TARGET_NYM: to,
         DATA: {
             AMOUNT: amount
         }
     })
     if expected:
         waitForSufficientRepliesForRequests(self.looper,
                                             self.client,
                                             requests=[req])
     else:
         timeout = waits.expectedReqNAckQuorumTime()
         for node in nodes:
             self.looper.run(
                 eventually(checkReqNack,
                            self.client,
                            node,
                            req.identifier,
                            req.reqId,
                            None,
                            retryWait=1,
                            timeout=timeout))
     return req
Ejemplo n.º 8
0
def test_msg_max_length_check_node_to_node(looper, txnPoolNodeSet, client1,
                                           wallet1, client1Connected,
                                           clientAndWallet2):
    """
    Two clients send 2*N requests each at the same time.
    N < MSG_LEN_LIMIT but 2*N > MSG_LEN_LIMIT so the requests pass the max
    length check for client-node requests but do not pass the check
    for node-node requests.
    """
    N = 10
    # it is an empirical value for N random requests
    # it has to be adjusted if the world changed (see pydoc)
    max_len_limit = 3000

    patch_msg_len_validators(max_len_limit, txnPoolNodeSet)

    client2, wallet2 = clientAndWallet2

    reqs1 = sendRandomRequests(wallet1, client1, N)
    reqs2 = sendRandomRequests(wallet2, client2, N)

    check_reqacks(client1, looper, reqs1, txnPoolNodeSet)
    check_reqacks(client2, looper, reqs2, txnPoolNodeSet)

    waitForSufficientRepliesForRequests(looper, client1, requests=reqs1)
    waitForSufficientRepliesForRequests(looper, client2, requests=reqs2)
Ejemplo n.º 9
0
def test_proof_in_reply(looper, txnPoolNodeSet,
                        client1, client1Connected, wallet1):
    reqs = sendRandomRequests(wallet1, client1, 1)
    waitForSufficientRepliesForRequests(looper, client1, requests=reqs)

    req = reqs[0]
    result = client1.getReply(req.identifier, req.reqId)[0]

    assert result
    assert result[TXN_TYPE] == "buy"
    assert result[f.IDENTIFIER.nm] == req.identifier
    assert result[f.REQ_ID.nm] == req.reqId
    assert result[f.SEQ_NO.nm]
    assert result[TXN_TIME]
    assert STATE_PROOF in result

    state_proof = result[STATE_PROOF]
    assert ROOT_HASH in state_proof
    assert MULTI_SIGNATURE in state_proof
    assert PROOF_NODES in state_proof

    multi_sig = state_proof[MULTI_SIGNATURE]
    assert MULTI_SIGNATURE_SIGNATURE in multi_sig
    assert MULTI_SIGNATURE_PARTICIPANTS in multi_sig
    assert MULTI_SIGNATURE_VALUE in multi_sig

    multi_sig_value = multi_sig[MULTI_SIGNATURE_VALUE]
    assert MULTI_SIGNATURE_VALUE_LEDGER_ID in multi_sig_value
    assert MULTI_SIGNATURE_VALUE_STATE_ROOT in multi_sig_value
    assert MULTI_SIGNATURE_VALUE_TXN_ROOT in multi_sig_value
    assert MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT in multi_sig_value
    assert MULTI_SIGNATURE_VALUE_TIMESTAMP in multi_sig_value

    assert client1.validate_multi_signature(state_proof)
    assert client1.validate_proof(result)
def test_state_proof_returned_for_missing_schema(looper, trustAnchor,
                                                 trustAnchorWallet):
    """
    Tests that state proof is returned in the reply for GET_NYM transactions
    """
    client = trustAnchor
    dest = trustAnchorWallet.defaultId
    schema_name = "test_schema"
    schema_version = "1.0"
    get_schema_operation = {
        TARGET_NYM: dest,
        TXN_TYPE: GET_SCHEMA,
        DATA: {
            NAME: schema_name,
            VERSION: schema_version,
        }
    }
    get_schema_request = trustAnchorWallet.signOp(get_schema_operation)
    trustAnchorWallet.pendRequest(get_schema_request)
    pending = trustAnchorWallet.preparePending()
    client.submitReqs(*pending)
    waitForSufficientRepliesForRequests(looper, trustAnchor, requests=pending)
    replies = getRepliesFromClientInbox(client.inBox, get_schema_request.reqId)
    for reply in replies:
        result = reply['result']
        assert ATTR_NAMES not in result[DATA]
        check_valid_proof(reply, client)
Ejemplo n.º 11
0
def put_load():
    port = genHa()[1]
    ha = HA('0.0.0.0', port)
    name = "hello"
    wallet = Wallet(name)
    wallet.addIdentifier(signer=DidSigner(
        seed=b'000000000000000000000000Steward1'))
    client = Client(name, ha=ha)
    with Looper(debug=True) as looper:
        looper.add(client)
        print('Will send {} reqs in all'.format(numReqs))
        requests = sendRandomRequests(wallet, client, numReqs)
        start = perf_counter()
        for i in range(0, numReqs, numReqs // splits):
            print('Will wait for {} now'.format(numReqs // splits))
            s = perf_counter()
            reqs = requests[i:i + numReqs // splits + 1]
            waitForSufficientRepliesForRequests(looper,
                                                client,
                                                requests=reqs,
                                                customTimeoutPerReq=100,
                                                override_timeout_limit=True)
            print('>>> Got replies for {} requests << in {}'.format(
                numReqs // splits,
                perf_counter() - s))
        end = perf_counter()
        print('>>>Total {} in {}<<<'.format(numReqs, end - start))
        exit(0)
def test_different_ledger_request_interleave(tconf, looper, txnPoolNodeSet,
                                             client1, wallet1, one_node_added,
                                             client1Connected, tdir,
                                             client_tdir, tdirWithPoolTxns,
                                             steward1, stewardWallet,
                                             allPluginsPath):
    """
    Send pool and domain ledger requests such that they interleave, and do
    view change in between and verify the pool is functional
    """
    new_node = one_node_added
    sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 2)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    # Send domain ledger requests but don't wait for replies
    requests = sendRandomRequests(wallet1, client1, 2)
    # Add another node by sending pool ledger request
    _, _, new_theta = nodeThetaAdded(looper,
                                     txnPoolNodeSet,
                                     tdir,
                                     client_tdir,
                                     tconf,
                                     steward1,
                                     stewardWallet,
                                     allPluginsPath,
                                     name='new_theta')

    # Send more domain ledger requests but don't wait for replies
    requests.extend(sendRandomRequests(wallet1, client1, 3))

    # Do view change without waiting for replies
    ensure_view_change(looper, nodes=txnPoolNodeSet)
    checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1)

    # Make sure all requests are completed
    waitForSufficientRepliesForRequests(looper, client1, requests=requests)

    ensure_pool_functional(looper, txnPoolNodeSet, wallet1, client1)

    new_steward, new_steward_wallet = addNewSteward(looper, client_tdir,
                                                    steward1, stewardWallet,
                                                    'another_ste')

    # Send another pool ledger request (NODE) but don't wait for completion of
    # request
    next_node_name = 'next_node'
    r = sendAddNewNode(tdir, tconf, next_node_name, new_steward,
                       new_steward_wallet)
    node_req = r[0]

    # Send more domain ledger requests but don't wait for replies
    requests = [
        node_req, *sendRandomRequests(new_steward_wallet, new_steward, 5)
    ]

    # Make sure all requests are completed
    waitForSufficientRepliesForRequests(looper, new_steward, requests=requests)

    # Make sure pool is functional
    ensure_pool_functional(looper, txnPoolNodeSet, wallet1, client1)
Ejemplo n.º 13
0
def addNewNode(looper,
               stewardClient,
               stewardWallet,
               newNodeName,
               tdir,
               tconf,
               allPluginsPath=None,
               autoStart=True,
               nodeClass=TestNode,
               transformOpFunc=None,
               do_post_node_creation: Callable = None):
    nodeClass = nodeClass or TestNode
    req, nodeIp, nodePort, clientIp, clientPort, sigseed \
        = sendAddNewNode(tdir, tconf, newNodeName, stewardClient, stewardWallet,
                         transformOpFunc)
    waitForSufficientRepliesForRequests(looper, stewardClient, requests=[req])

    return create_and_start_new_node(
        looper,
        newNodeName,
        tdir,
        sigseed, (nodeIp, nodePort), (clientIp, clientPort),
        tconf,
        autoStart,
        allPluginsPath,
        nodeClass,
        do_post_node_creation=do_post_node_creation)
Ejemplo n.º 14
0
def load():
    port = genHa()[1]
    ha = HA('0.0.0.0', port)
    name = "hello"
    wallet = Wallet(name)
    wallet.addIdentifier(signer=SimpleSigner(
        seed=b'000000000000000000000000Steward1'))
    client = Client(name, ha=ha)
    with Looper(debug=getConfig().LOOPER_DEBUG) as looper:
        looper.add(client)
        print('Will send {} reqs in all'.format(numReqs))
        requests = sendRandomRequests(wallet, client, numReqs)
        start = perf_counter()
        for i in range(0, numReqs, numReqs // splits):
            print('Will wait for {} now'.format(numReqs // splits))
            s = perf_counter()
            reqs = requests[i:i + numReqs // splits + 1]
            waitForSufficientRepliesForRequests(looper,
                                                client,
                                                requests=reqs,
                                                fVal=2,
                                                customTimeoutPerReq=3)
            print('>>> Got replies for {} requests << in {}'.format(
                numReqs // splits,
                perf_counter() - s))
        end = perf_counter()
        print('>>>{}<<<'.format(end - start))
        exit(0)
Ejemplo n.º 15
0
def put_load():
    port = genHa()[1]
    ha = HA('0.0.0.0', port)
    name = "hello"
    wallet = Wallet(name)
    wallet.addIdentifier(
        signer=DidSigner(seed=b'000000000000000000000000Steward1'))
    client = Client(name, ha=ha)
    with Looper(debug=getConfig().LOOPER_DEBUG) as looper:
        looper.add(client)
        print('Will send {} reqs in all'.format(numReqs))
        requests = sendRandomRequests(wallet, client, numReqs)
        start = perf_counter()
        for i in range(0, numReqs, numReqs // splits):
            print('Will wait for {} now'.format(numReqs // splits))
            s = perf_counter()
            reqs = requests[i:i + numReqs // splits + 1]
            waitForSufficientRepliesForRequests(looper, client, requests=reqs,
                                                customTimeoutPerReq=100,
                                                override_timeout_limit=True)
            print('>>> Got replies for {} requests << in {}'.
                  format(numReqs // splits, perf_counter() - s))
        end = perf_counter()
        print('>>>Total {} in {}<<<'.format(numReqs, end - start))
        exit(0)
Ejemplo n.º 16
0
def testReqExecWhenReturnedByMaster(tdir_for_func, tconf_for_func):
    with TestNodeSet(tconf_for_func, count=4, tmpdir=tdir_for_func) as nodeSet:
        with Looper(nodeSet) as looper:
            client1, wallet1 = setupNodesAndClient(looper,
                                                   nodeSet,
                                                   tmpdir=tdir_for_func)
            req = sendRandomRequest(wallet1, client1)
            waitForSufficientRepliesForRequests(looper,
                                                client1,
                                                requests=[req])

            async def chk():
                for node in nodeSet:
                    entries = node.spylog.getAll(node.processOrdered.__name__)
                    for entry in entries:
                        arg = entry.params['ordered']
                        result = entry.result
                        if arg.instId == node.instances.masterId:
                            assert result
                        else:
                            assert result is False

            timeout = waits.expectedOrderingTime(
                nodeSet.nodes['Alpha'].instances.count)
            looper.run(eventually(chk, timeout=timeout))
Ejemplo n.º 17
0
def requests(looper, wallet1, client1):
    requests = []
    for i in range(5):
        req = sendRandomRequest(wallet1, client1)
        waitForSufficientRepliesForRequests(looper, client1, requests=[req])
        requests.append(req)
    return requests
Ejemplo n.º 18
0
def addNewNode(looper,
               stewardClient,
               stewardWallet,
               newNodeName,
               tdir,
               tconf,
               allPluginsPath=None,
               autoStart=True,
               nodeClass=TestNode,
               transformOpFunc=None):
    nodeClass = nodeClass or TestNode
    req, nodeIp, nodePort, clientIp, clientPort, sigseed \
        = sendAddNewNode(tdir, tconf, newNodeName, stewardClient, stewardWallet,
                         transformOpFunc)
    waitForSufficientRepliesForRequests(looper, stewardClient, requests=[req])

    # initNodeKeysForBothStacks(newNodeName, tdir, sigseed, override=True)
    # node = nodeClass(newNodeName, basedirpath=tdir, config=tconf,
    #                  ha=(nodeIp, nodePort), cliha=(clientIp, clientPort),
    #                  pluginPaths=allPluginsPath)
    # if autoStart:
    #     looper.add(node)
    # return node
    return start_newly_added_node(looper, newNodeName, tdir, sigseed,
                                  (nodeIp, nodePort), (clientIp, clientPort),
                                  tconf, autoStart, allPluginsPath, nodeClass)
Ejemplo n.º 19
0
 def write_wrapped():
     req = sendRandomRequest(wallet, client)
     waitForSufficientRepliesForRequests(txnPoolNodesLooper,
                                         client,
                                         requests=[req])
     txnPoolNodesLooper.runFor(patched_dump_info_period)
     return load_info(info_path)
Ejemplo n.º 20
0
def testReplyWhenRequestAlreadyExecuted(looper, txnPoolNodeSet, client1,
                                        sent1):
    """
    When a request has already been executed the previously executed reply
    will be sent again to the client. An acknowledgement will not be sent
    for a repeated request.
    """
    waitForSufficientRepliesForRequests(looper, client1, requests=[sent1])

    originalRequestResponsesLen = nodeCount * 2
    duplicateRequestRepliesLen = nodeCount  # for a duplicate request we need to

    message_parts, err_msg = \
        client1.nodestack.prepare_for_sending(sent1, None)

    for part in message_parts:
        client1.nodestack._enqueueIntoAllRemotes(part, None)

    def chk():
        assertLength([
            response for response in client1.inBox
            if (response[0].get(f.RESULT.nm)
                and response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or (
                    response[0].get(OP_FIELD_NAME) == REQACK
                    and response[0].get(f.REQ_ID.nm) == sent1.reqId)
        ], originalRequestResponsesLen + duplicateRequestRepliesLen)

    responseTimeout = waits.expectedTransactionExecutionTime(nodeCount)
    looper.run(eventually(chk, retryWait=1, timeout=responseTimeout))
Ejemplo n.º 21
0
def setup(tconf, looper, txnPoolNodeSet, client, wallet1):
    # Patch the 3phase request sending method to send incorrect digest and
    pr, otherR = getPrimaryReplica(txnPoolNodeSet, instId=0), \
        getNonPrimaryReplicas(txnPoolNodeSet, instId=0)

    reqs = sendRandomRequests(wallet1, client, tconf.Max3PCBatchSize)
    waitForSufficientRepliesForRequests(
        looper,
        client,
        requests=reqs,
        customTimeoutPerReq=tconf.Max3PCBatchWait)
    stateRoot = pr.stateRootHash(DOMAIN_LEDGER_ID, to_str=False)

    origMethod = pr.create3PCBatch
    malignedOnce = None

    def badMethod(self, ledgerId):
        nonlocal malignedOnce
        pp = origMethod(ledgerId)
        if not malignedOnce:
            pp = updateNamedTuple(pp, digest=pp.digest + '123')
            malignedOnce = True
        return pp

    pr.create3PCBatch = types.MethodType(badMethod, pr)
    sendRandomRequests(wallet1, client, tconf.Max3PCBatchSize)
    return pr, otherR, stateRoot
Ejemplo n.º 22
0
def test_slow_node_has_warn_unordered_log_msg(looper, nodeSet, wallet1,
                                              client1, patch_monitors):
    npr = getNonPrimaryReplicas(nodeSet, 0)[0]
    slow_node = npr.node

    monitor = nodeSet[0].monitor
    delay = monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC * \
        monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM + 10
    delaysCommitProcessing(slow_node, delay=delay)

    assert no_any_warn(*nodeSet), \
        'all nodes do not have warnings before test'

    for i in range(monitor.WARN_NOT_PARTICIPATING_UNORDERED_NUM):
        req = sendRandomRequest(wallet1, client1)
        waitForSufficientRepliesForRequests(looper, client1, requests=[req])
        looper.runFor(monitor.WARN_NOT_PARTICIPATING_MIN_DIFF_SEC)

    others = [node for node in nodeSet if node.name != slow_node.name]
    assert no_any_warn(*others), \
        'others do not have warning after test'
    assert has_some_warn(slow_node), \
        'slow node has the warning'

    ordered_requests_keys_len_before = len(monitor.ordered_requests_keys)
    # wait at least windows time
    looper.runFor(monitor.WARN_NOT_PARTICIPATING_WINDOW_MINS * 60)
    req = sendRandomRequest(wallet1, client1)
    waitForSufficientRepliesForRequests(looper, client1, requests=[req])
    assert no_any_warn(*others), 'others do not have warning'
    assert no_last_warn(slow_node), \
        'the last call of warn_has_lot_unordered_requests returned False ' \
        'so slow node has no the warning for now'
    assert len(monitor.ordered_requests_keys) < ordered_requests_keys_len_before, \
        "ordered_requests_keys was cleaned up"
Ejemplo n.º 23
0
def testQueueingReqFromFutureView(delayed_perf_chk, looper, nodeSet, up,
                                  wallet1, client1):
    """
    Test if every node queues 3 Phase requests(PRE-PREPARE, PREPARE and COMMIT)
    that come from a view which is greater than the current view.
    - Delay reception and processing of view change messages by a non primary for master instance
       => it starts receiving 3 phase commit messages for next view
    """

    lagging_node = get_last_master_non_primary_node(nodeSet)
    old_view_no = lagging_node.viewNo

    # Delay processing of InstanceChange and ViewChangeDone so node stashes
    # 3PC messages
    delay_ic = 60
    lagging_node.nodeIbStasher.delay(icDelay(delay_ic))
    lagging_node.nodeIbStasher.delay(vcd_delay(delay_ic))
    logger.debug('{} will delay its view change'.format(lagging_node))

    def chk_fut_view(view_no, is_empty):
        length = len(lagging_node.msgsForFutureViews.get(view_no, ()))
        if is_empty:
            assert length == 0
        else:
            assert length > 0
        return length

    # No messages queued for future view
    chk_fut_view(old_view_no + 1, is_empty=True)
    logger.debug(
        '{} does not have any messages for future views'.format(lagging_node))

    # Every node except Node A should do a view change
    ensure_view_change(looper, [n for n in nodeSet if n != lagging_node],
                       [lagging_node])

    # send more requests that will be queued for the lagged node
    # sendReqsToNodesAndVerifySuffReplies(looper, wallet1, client1, 3)
    reqs = sendRandomRequests(wallet1, client1, 5)
    l = looper.run(
        eventually(chk_fut_view, old_view_no + 1, False, retryWait=1))
    logger.debug('{} has {} messages for future views'.format(lagging_node, l))

    waitForSufficientRepliesForRequests(looper, client1, requests=reqs)
    # reset delays for the lagging_node node so that it finally makes view
    # change
    lagging_node.reset_delays_and_process_delayeds()

    # Eventually no messages queued for future view
    looper.run(
        eventually(chk_fut_view,
                   old_view_no + 1,
                   True,
                   retryWait=1,
                   timeout=delay_ic + 10))
    logger.debug(
        '{} exhausted pending messages for future views'.format(lagging_node))

    send_reqs_to_nodes_and_verify_all_replies(looper, wallet1, client1, 2)
Ejemplo n.º 24
0
def changeNodeHa(looper, txnPoolNodeSet, tdirWithClientPoolTxns,
                 poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary, tdir):
    # prepare new ha for node and client stack
    subjectedNode = None
    stewardName = None
    stewardsSeed = None

    for nodeIndex, n in enumerate(txnPoolNodeSet):
        if shouldBePrimary == n.has_master_primary:
            subjectedNode = n
            stewardName = poolTxnStewardNames[nodeIndex]
            stewardsSeed = poolTxnData["seeds"][stewardName].encode()
            break

    nodeStackNewHA, clientStackNewHA = genHa(2)
    logger.debug("change HA for node: {} to {}".format(
        subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))

    nodeSeed = poolTxnData["seeds"][subjectedNode.name].encode()

    # change HA
    stewardClient, req = changeHA(looper, tconf, subjectedNode.name, nodeSeed,
                                  nodeStackNewHA, stewardName, stewardsSeed,
                                  basedir=tdirWithClientPoolTxns)

    waitForSufficientRepliesForRequests(looper, stewardClient,
                                        requests=[req])

    # stop node for which HA will be changed
    subjectedNode.stop()
    looper.removeProdable(subjectedNode)

    # start node with new HA
    config_helper = PNodeConfigHelper(subjectedNode.name, tconf, chroot=tdir)
    restartedNode = TestNode(subjectedNode.name,
                             config_helper=config_helper,
                             config=tconf, ha=nodeStackNewHA,
                             cliha=clientStackNewHA)
    looper.add(restartedNode)
    txnPoolNodeSet[nodeIndex] = restartedNode
    looper.run(checkNodesConnected(txnPoolNodeSet, customTimeout=70))

    electionTimeout = waits.expectedPoolElectionTimeout(
        nodeCount=len(txnPoolNodeSet),
        numOfReelections=3)
    ensureElectionsDone(looper,
                        txnPoolNodeSet,
                        retryWait=1,
                        customTimeout=electionTimeout)

    # start client and check the node HA
    anotherClient, _ = genTestClient(tmpdir=tdirWithClientPoolTxns,
                                     usePoolLedger=True)
    looper.add(anotherClient)
    looper.run(eventually(anotherClient.ensureConnectedToNodes))
    stewardWallet = Wallet(stewardName)
    stewardWallet.addIdentifier(signer=DidSigner(seed=stewardsSeed))
    sendReqsToNodesAndVerifySuffReplies(
        looper, stewardWallet, stewardClient, 8)
Ejemplo n.º 25
0
def testClientSendingSameRequestAgainBeforeFirstIsProcessed(
        looper, nodeSet, up, wallet1, client1):
    size = len(client1.inBox)
    req = sendRandomRequest(wallet1, client1)
    client1.submitReqs(req)
    waitForSufficientRepliesForRequests(looper, client1, requests=[req])
    # Only REQACK will be sent twice by the node but not REPLY
    assert len(client1.inBox) == size + 12
Ejemplo n.º 26
0
def updateNodeData(looper, stewardClient, stewardWallet, node, node_data):
    req = sendUpdateNode(stewardClient, stewardWallet, node, node_data)
    waitForSufficientRepliesForRequests(looper, stewardClient, requests=[req])
    # TODO: Not needed in ZStack, remove once raet is removed
    node.nodestack.clearLocalKeep()
    node.nodestack.clearRemoteKeeps()
    node.clientstack.clearLocalKeep()
    node.clientstack.clearRemoteKeeps()
Ejemplo n.º 27
0
 def getBalance(self) -> int:
     req = self.submit({
         TXN_TYPE: GET_BAL,
         TARGET_NYM: self.wallet.defaultId
     })
     waitForSufficientRepliesForRequests(self.looper, self.client,
                                         requests=[req])
     return self.client.hasConsensus(*req.key)[BALANCE]
Ejemplo n.º 28
0
def test3PCOverBatchWithThresholdReqs(tconf, looper, txnPoolNodeSet, client,
                                      wallet1):
    """
    Check that 3 phase commit happens when threshold number of requests are
    received and propagated.
    :return:
    """
    reqs = sendRandomRequests(wallet1, client, tconf.Max3PCBatchSize)
    waitForSufficientRepliesForRequests(looper, client, requests=reqs)
    def checkTxns(self):
        req = self.submit({
            TXN_TYPE: GET_ALL_TXNS,
            TARGET_NYM: self.wallet.defaultId
        })
        waitForSufficientRepliesForRequests(self.looper, self.client,
                                            requests=[req])

        return req
Ejemplo n.º 30
0
def test_request_no_protocol_version(looper, txnPoolNodeSet, client1, wallet1,
                                     request_num):
    reqs = random_request_objects(request_num, protocol_version=None)
    reqs = sign_request_objects(wallet1, reqs)
    for req in reqs:
        assert req.protocolVersion is None

    send_signed_requests(client1, reqs)
    waitForSufficientRepliesForRequests(looper, client1, requests=reqs)
Ejemplo n.º 31
0
def test3PCOverBatchWithLessThanThresholdReqs(tconf, looper, txnPoolNodeSet,
                                              client, wallet1):
    """
    Check that 3 phase commit happens when threshold number of requests are
    not received but threshold time has passed
    :return:
    """
    reqs = sendRandomRequests(wallet1, client, tconf.Max3PCBatchSize - 1)
    waitForSufficientRepliesForRequests(looper, client, requests=reqs)
Ejemplo n.º 32
0
def ensureUpgradeSent(looper, trustee, trusteeWallet, upgradeData):
    upgrade, req = sendUpgrade(trustee, trusteeWallet, upgradeData)
    waitForSufficientRepliesForRequests(looper, trustee, requests=[req])

    def check():
        assert trusteeWallet.getPoolUpgrade(upgrade.key).seqNo

    timeout = plenumWaits.expectedReqAckQuorumTime()
    looper.run(eventually(check, retryWait=1, timeout=timeout))
    return upgrade
Ejemplo n.º 33
0
def ensurePoolConfigSent(looper, trustee, trusteeWallet, sendPoolCfg):
    poolCfg, req = sendPoolConfig(trustee, trusteeWallet, sendPoolCfg)
    waitForSufficientRepliesForRequests(looper, trustee, requests=[req])

    def check():
        assert trusteeWallet.getPoolConfig(poolCfg.key).seqNo

    timeout = plenumWaits.expectedReqAckQuorumTime()
    looper.run(eventually(check, retryWait=1, timeout=timeout))
    return poolCfg
Ejemplo n.º 34
0
def nodeThetaAdded(looper, nodeSet, tdirWithPoolTxns, tconf, steward,
                   stewardWallet, allPluginsPath, testNodeClass,
                   testClientClass, tdir):
    newStewardName = "testClientSteward" + randomString(3)
    newNodeName = "Theta"
    newSteward, newStewardWallet = getClientAddedWithRole(nodeSet, tdir,
                                                          looper, steward,
                                                          stewardWallet,
                                                          newStewardName,
                                                          role=STEWARD)

    sigseed = randomString(32).encode()
    nodeSigner = SimpleSigner(seed=sigseed)

    (nodeIp, nodePort), (clientIp, clientPort) = genHa(2)

    data = {
        NODE_IP: nodeIp,
        NODE_PORT: nodePort,
        CLIENT_IP: clientIp,
        CLIENT_PORT: clientPort,
        ALIAS: newNodeName,
        SERVICES: [VALIDATOR, ]
    }

    node = Node(nodeSigner.identifier, data, newStewardWallet.defaultId)

    newStewardWallet.addNode(node)
    reqs = newStewardWallet.preparePending()
    req, = newSteward.submitReqs(*reqs)

    waitForSufficientRepliesForRequests(looper, newSteward, requests=[req])

    def chk():
        assert newStewardWallet.getNode(node.id).seqNo is not None

    timeout = plenumWaits.expectedTransactionExecutionTime(len(nodeSet))
    looper.run(eventually(chk, retryWait=1, timeout=timeout))

    initNodeKeysForBothStacks(newNodeName, tdirWithPoolTxns, sigseed, override=True)

    newNode = testNodeClass(newNodeName, basedirpath=tdir, config=tconf,
                            ha=(nodeIp, nodePort), cliha=(clientIp, clientPort),
                            pluginPaths=allPluginsPath)

    nodeSet.append(newNode)
    looper.add(newNode)
    looper.run(checkNodesConnected(nodeSet))
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward,
                                                  *nodeSet)
    ensureClientConnectedToNodesAndPoolLedgerSame(looper, newSteward,
                                                  *nodeSet)
    return newSteward, newStewardWallet, newNode
def test_successive_batch_do_no_change_state(looper, tdirWithPoolTxns,
                                            tdirWithDomainTxnsUpdated,
                                             tconf, nodeSet,
                                            trustee, trusteeWallet):
    """
    Send 2 NYM txns in different batches such that the second batch does not
    change state so that state root remains same, but keep the identifier
    and reqId different. Make sure the first request is not ordered by the
    primary before PRE-PREPARE for the second is sent.
    Also check reject and commit
    :return:
    """
    prim_node = getPrimaryReplica(nodeSet, 0).node
    other_nodes = [n for n in nodeSet if n != prim_node]
    # Delay only first PRE-PREPARE
    pp_seq_no_to_delay = 1

    def specific_pre_prepare(wrappedMsg):
        nonlocal pp_seq_no_to_delay
        msg, sender = wrappedMsg
        if isinstance(msg, PrePrepare) and \
                        msg.instId == 0 and \
                        msg.ppSeqNo == pp_seq_no_to_delay:
            return 5

    def delay_commits(wrappedMsg):
        msg, sender = wrappedMsg
        if isinstance(msg, Commit) and msg.instId == 0:
            return 10

    def new_identity():
        wallet = Wallet(randomString(5))
        signer = DidSigner()
        new_idr, _ = wallet.addIdentifier(signer=signer)
        verkey = wallet.getVerkey(new_idr)
        idy = Identity(identifier=new_idr,
                       verkey=verkey,
                       role=None)
        return idy

    def submit_id_req(idy):
        nonlocal all_reqs
        trusteeWallet.updateTrustAnchoredIdentity(idy)
        reqs = trusteeWallet.preparePending()
        all_reqs.extend(reqs)
        trustee.submitReqs(*reqs)

    def check_verkey(i, vk):
        for node in nodeSet:
            data = node.reqHandler.idrCache.getNym(i, isCommitted=True)
            assert data[VERKEY] == vk

    def check_uncommitted(count):
        for node in nodeSet:
            assert len(node.reqHandler.idrCache.unCommitted) == count

    for node in other_nodes:
        node.nodeIbStasher.delay(specific_pre_prepare)

    idy = new_identity()
    new_idr = idy.identifier
    verkey = idy.verkey

    all_reqs = []

    # Setting the same verkey twice but in different batches with different
    #  request ids
    for _ in range(3):
        submit_id_req(idy)
        looper.runFor(.2)

    waitForSufficientRepliesForRequests(looper, trustee, requests=all_reqs,
                                        add_delay_to_timeout=5)

    # Number of uncommitted entries is 0
    looper.run(eventually(check_uncommitted, 0))

    check_verkey(new_idr, verkey)

    pp_seq_no_to_delay = 4
    for node in other_nodes:
        node.nodeIbStasher.delay(specific_pre_prepare)

    # Setting the verkey to `x`, then `y` and then back to `x` but in different
    # batches with different with different request ids. The idea is to change
    # state root to `t` then `t'` and then back to `t` and observe that no
    # errors are encountered

    idy = new_identity()
    new_idr = idy.identifier
    verkey = idy.verkey
    submit_id_req(idy)
    looper.runFor(.2)

    new_verkey = SimpleSigner().verkey
    idy.verkey = new_verkey
    submit_id_req(idy)
    looper.runFor(.2)

    idy.verkey = verkey
    submit_id_req(idy)
    looper.runFor(.2)

    waitForSufficientRepliesForRequests(looper, trustee, requests=all_reqs,
                                        add_delay_to_timeout=5)

    # Number of uncommitted entries is 0
    looper.run(eventually(check_uncommitted, 0))

    check_verkey(new_idr, verkey)

    # Dleay COMMITs so that IdrCache can be checked for correct
    # number of entries

    uncommitteds = {}
    methods = {}
    for node in nodeSet:
        node.nodeIbStasher.delay(delay_commits)

        cache = node.reqHandler.idrCache
        uncommitteds[cache._name] = []

        cre = cache.currentBatchCreated
        com = cache.onBatchCommitted
        methods[cache._name] = (cre, com)

        # Patch methods to record and check roots after commit

        def patched_cre(self, stateRoot):
            uncommitteds[self._name].append(stateRoot)
            return methods[self._name][0](stateRoot)

        def patched_com(self, stateRoot):
            assert uncommitteds[self._name][0] == stateRoot
            rv = methods[self._name][1](stateRoot)
            uncommitteds[self._name] = uncommitteds[self._name][1:]
            return rv

        cache.currentBatchCreated = types.MethodType(patched_cre, cache)
        cache.onBatchCommitted = types.MethodType(patched_com, cache)

    # Set verkey of multiple identities
    more = 5
    keys = {}
    for _ in range(more):
        idy = new_identity()
        keys[idy.identifier] = idy.verkey
        submit_id_req(idy)
        looper.runFor(.01)

    # Correct number of uncommitted entries
    looper.run(eventually(check_uncommitted, more, retryWait=1))

    waitForSufficientRepliesForRequests(looper, trustee, requests=all_reqs,
                                        add_delay_to_timeout=10)

    # Number of uncommitted entries is 0
    looper.run(eventually(check_uncommitted, 0))

    # The verkeys are correct
    for i, v in keys.items():
        check_verkey(i, v)

    keys = {}
    for _ in range(3):
        idy = new_identity()
        keys[idy.identifier] = idy.verkey
        submit_id_req(idy)
        looper.runFor(.01)

    # Correct number of uncommitted entries
    looper.run(eventually(check_uncommitted, 3, retryWait=1))

    # Check batch reject
    for node in nodeSet:
        cache = node.reqHandler.idrCache
        initial = cache.unCommitted
        cache.batchRejected()
        # After reject, last entry is removed
        assert cache.unCommitted == initial[:-1]
        root = cache.unCommitted[0][0]
        cache.onBatchCommitted(root)
        # Calling commit with same root results in Assertion error
        with pytest.raises(AssertionError):
            cache.onBatchCommitted(root)
def test_successive_batch_do_no_change_state(looper,
                                             tdirWithDomainTxnsUpdated,
                                             tdirWithClientPoolTxns,
                                             tconf, nodeSet, trustee,
                                             trusteeWallet, monkeypatch):
    """
    Send 2 NYM txns in different batches such that the second batch does not
    change state so that state root remains same, but keep the identifier
    and reqId different. Make sure the first request is not ordered by the
    primary before PRE-PREPARE for the second is sent.
    Also check reject and commit
    :return:
    """
    all_reqs = []

    # Delay only first PRE-PREPARE
    pp_seq_no_to_delay = 1
    delay_pp_duration = 5
    delay_cm_duration = 10

    def delay_commits(wrappedMsg):
        msg, sender = wrappedMsg
        if isinstance(msg, Commit) and msg.instId == 0:
            return delay_cm_duration

    def new_identity():
        wallet = Wallet(randomString(5))
        signer = DidSigner()
        new_idr, _ = wallet.addIdentifier(signer=signer)
        verkey = wallet.getVerkey(new_idr)
        idy = Identity(identifier=new_idr,
                       verkey=verkey,
                       role=None)
        return idy, wallet

    def submit_id_req(idy, wallet=None, client=None):
        nonlocal all_reqs
        wallet = wallet if wallet is not None else trusteeWallet
        client = client if client is not None else trustee
        wallet.updateTrustAnchoredIdentity(idy)
        reqs = wallet.preparePending()
        all_reqs.extend(reqs)
        client.submitReqs(*reqs)
        return reqs

    def submit_id_req_and_wait(idy, wallet=None, client=None):
        reqs = submit_id_req(idy, wallet=wallet, client=client)
        looper.runFor(.2)
        return reqs

    def check_verkey(i, vk):
        for node in nodeSet:
            data = node.idrCache.getNym(i, isCommitted=True)
            assert data[VERKEY] == vk

    def check_uncommitted(count):
        for node in nodeSet:
            assert len(node.idrCache.un_committed) == count

    for node in nodeSet:
        for rpl in node.replicas:
            monkeypatch.setattr(rpl, '_request_missing_three_phase_messages',
                                lambda *x, **y: None)

    idy, new_wallet = new_identity()
    new_idr = idy.identifier
    verkey = idy.verkey

    submit_id_req(idy)
    waitForSufficientRepliesForRequests(looper, trustee, requests=all_reqs[-1:],
                                        add_delay_to_timeout=delay_cm_duration)

    for node in nodeSet:
        node.nodeIbStasher.delay(delay_commits)

    new_client, _ = genTestClient(nodeSet, tmpdir=tdirWithClientPoolTxns,
                                  usePoolLedger=True)
    looper.add(new_client)
    looper.run(new_client.ensureConnectedToNodes(count=len(nodeSet)))
    new_client.registerObserver(new_wallet.handleIncomingReply, name='temp')
    idy.seqNo = None

    # Setting the same verkey thrice but in different batches with different
    #  request ids
    for _ in range(3):
        req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
        logger.debug('{} sent request {} to change verkey'.
                     format(new_client, req))

    waitForSufficientRepliesForRequests(looper, new_client,
                                        requests=all_reqs[-3:],
                                        add_delay_to_timeout=delay_cm_duration)

    # Number of uncommitted entries is 0
    looper.run(eventually(check_uncommitted, 0))

    check_verkey(new_idr, verkey)

    new_client.deregisterObserver(name='temp')

    # Setting the verkey to `x`, then `y` and then back to `x` but in different
    # batches with different request ids. The idea is to change
    # state root to `t` then `t'` and then back to `t` and observe that no
    # errors are encountered

    idy, new_wallet = new_identity()
    submit_id_req(idy)
    waitForSufficientRepliesForRequests(looper, trustee, requests=all_reqs[-1:],
                                        add_delay_to_timeout=delay_cm_duration)

    new_client.registerObserver(new_wallet.handleIncomingReply)

    idy.seqNo = None
    x_signer = SimpleSigner(identifier=idy.identifier)
    idy.verkey = x_signer.verkey
    req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
    new_wallet.updateSigner(idy.identifier, x_signer)
    logger.debug('{} sent request {} to change verkey'.
                 format(new_client, req))

    y_signer = SimpleSigner(identifier=idy.identifier)
    idy.verkey = y_signer.verkey
    req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
    new_wallet.updateSigner(idy.identifier, y_signer)
    logger.debug('{} sent request {} to change verkey'.
                 format(new_client, req))

    idy.verkey = x_signer.verkey
    req, = submit_id_req_and_wait(idy, wallet=new_wallet, client=new_client)
    new_wallet.updateSigner(idy.identifier, x_signer)
    logger.debug('{} sent request {} to change verkey'.
                 format(new_client, req))

    waitForSufficientRepliesForRequests(looper, new_client,
                                        requests=all_reqs[-3:],
                                        add_delay_to_timeout=delay_cm_duration)

    # Number of uncommitted entries is 0
    looper.run(eventually(check_uncommitted, 0))

    check_verkey(new_idr, verkey)
    monkeypatch.undo()

    # Delay COMMITs so that IdrCache can be checked for correct
    # number of entries

    uncommitteds = {}
    methods = {}
    for node in nodeSet:
        cache = node.idrCache
        uncommitteds[cache._name] = []

        cre = cache.currentBatchCreated
        com = cache.onBatchCommitted
        methods[cache._name] = (cre, com)

        # Patch methods to record and check roots after commit

        def patched_cre(self, stateRoot):
            uncommitteds[self._name].append(stateRoot)
            return methods[self._name][0](stateRoot)

        def patched_com(self, stateRoot):
            assert uncommitteds[self._name][0] == stateRoot
            rv = methods[self._name][1](stateRoot)
            uncommitteds[self._name] = uncommitteds[self._name][1:]
            return rv

        cache.currentBatchCreated = types.MethodType(patched_cre, cache)
        cache.onBatchCommitted = types.MethodType(patched_com, cache)

    # Set verkey of multiple identities
    more = 5
    keys = {}
    for _ in range(more):
        idy, _ = new_identity()
        keys[idy.identifier] = idy.verkey
        submit_id_req(idy)
        looper.runFor(.01)

    # Correct number of uncommitted entries
    looper.run(eventually(check_uncommitted, more, retryWait=1))

    waitForSufficientRepliesForRequests(looper, trustee,
                                        requests=all_reqs[-more:],
                                        add_delay_to_timeout=delay_cm_duration)

    # Number of uncommitted entries is 0
    looper.run(eventually(check_uncommitted, 0))

    # The verkeys are correct
    for i, v in keys.items():
        check_verkey(i, v)

    waitNodeDataEquality(looper, nodeSet[0], *nodeSet[1:])

    keys = {}
    for _ in range(3):
        idy, _ = new_identity()
        keys[idy.identifier] = idy.verkey
        submit_id_req(idy)
        looper.runFor(.01)

    # Correct number of uncommitted entries
    looper.run(eventually(check_uncommitted, 3, retryWait=1))

    # Check batch reject
    for node in nodeSet:
        cache = node.idrCache
        initial = cache.un_committed
        cache.batchRejected()
        # After reject, last entry is removed
        assert cache.un_committed == initial[:-1]
        root = cache.un_committed[0][0]
        cache.onBatchCommitted(root)
        # Calling commit with same root results in Assertion error
        with pytest.raises(AssertionError):
            cache.onBatchCommitted(root)