def testTreeRootsCorrectAfterEachBatch(tconf, looper, txnPoolNodeSet,
                                       sdk_pool_handle, sdk_wallet_client):
    """
    Check if both state root and txn tree root are correct and same on each
    node after each batch
    :return:
    """
    # Send 1 batch
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize)
    checkNodesHaveSameRoots(txnPoolNodeSet)

    # Send 2 batches
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * tconf.Max3PCBatchSize)
    checkNodesHaveSameRoots(txnPoolNodeSet)
Beispiel #2
0
def test_catchup_during_3pc(tconf, looper, txnPoolNodeSet, sdk_wallet_client,
                            sdk_pool_handle):
    reqs = sdk_signed_random_requests(looper, sdk_wallet_client,
                                      tconf.Max3PCBatchSize)
    non_primary_replica = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0]

    # Simulate catch-up (add txns to ledger):
    # add txns corresponding to the requests after we got enough COMMITs to
    # order, but before ordering.
    add_txns_to_ledger_before_order(
        non_primary_replica,
        [json.loads(req) for req in reqs[:tconf.Max3PCBatchSize]])
    sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle)
    checkNodesHaveSameRoots(txnPoolNodeSet)
def testTreeRootsCorrectAfterEachBatch(tconf, looper, txnPoolNodeSet,
                                       sdk_pool_handle, sdk_wallet_client):
    """
    Check if both state root and txn tree root are correct and same on each
    node after each batch
    :return:
    """
    # Send 1 batch
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize)
    checkNodesHaveSameRoots(txnPoolNodeSet)

    # Send 2 batches
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 2 * tconf.Max3PCBatchSize)
    checkNodesHaveSameRoots(txnPoolNodeSet)
Beispiel #4
0
def send_and_check(signed_reqs, looper, txnPoolNodeSet, pool_h, timeout=None):
    if not timeout:
        timeout_per_request = waits.expectedTransactionExecutionTime(
            len(txnPoolNodeSet))
        # here we try to take into account what timeout for execution
        # N request - total_timeout should be in
        # timeout_per_request < total_timeout < timeout_per_request * N
        # we cannot just take (timeout_per_request * N) because it is so huge.
        # (for timeout_per_request=5 and N=10, total_timeout=50sec)
        # lets start with some simple formula:
        timeout = (1 + len(signed_reqs) / 10) * timeout_per_request

    results = sdk_send_signed_requests(pool_h, signed_reqs)
    sdk_get_replies(looper, results, timeout=timeout)
    checkNodesHaveSameRoots(txnPoolNodeSet)
Beispiel #5
0
def testTreeRootsCorrectAfterEachBatch(tconf, looper, txnPoolNodeSet, client,
                                       wallet1):
    """
    Check if both state root and txn tree root are correct and same on each
    node after each batch
    :return:
    """
    # Send 1 batch
    reqs = sendRandomRequests(wallet1, client, tconf.Max3PCBatchSize)
    waitForSufficientRepliesForRequests(looper, client, requests=reqs)
    checkNodesHaveSameRoots(txnPoolNodeSet)

    # Send 2 batches
    reqs = sendRandomRequests(wallet1, client, 2 * tconf.Max3PCBatchSize)
    waitForSufficientRepliesForRequests(looper, client, requests=reqs)
    checkNodesHaveSameRoots(txnPoolNodeSet)
Beispiel #6
0
def testPrePrepareProcessedInOrder(tconf, looper, txnPoolNodeSet, wallet1,
                                   client):
    """
    A non-primary receives PRE-PREPARE out of order, it receives with ppSeqNo 2
     earlier than it receives the one with ppSeqNo 1 but it stashes the one
     with ppSeqNo 2 and only unstashes it for processing once it has
     processed PRE-PREPARE with ppSeqNo 1
    :return:
    """
    pr, otherR = getPrimaryReplica(txnPoolNodeSet, instId=0), \
                 getNonPrimaryReplicas(txnPoolNodeSet, instId=0)
    otherNodes = [r.node for r in otherR]
    ppsToDelay = 2
    ppDelay = 3
    delayeds = 0
    expectedDelayeds = (len(txnPoolNodeSet) - 1) * ppsToDelay
    delayedPpSeqNos = set()

    def specificPrePrepares(wrappedMsg):
        nonlocal delayeds
        msg, sender = wrappedMsg
        if isinstance(msg, PrePrepare) and delayeds < expectedDelayeds:
            delayeds += 1
            delayedPpSeqNos.add(msg.ppSeqNo)
            logger.debug('ppSeqNo {} would be delayed'.format(msg.ppSeqNo))
            return ppDelay

    for node in otherNodes:
        logger.debug(
            '{} would be delaying reception of some pre-prepares'.format(node))
        node.nodeIbStasher.delay(specificPrePrepares)

    reqs = sendRandomRequests(wallet1, client,
                              (ppsToDelay + 1) * tconf.Max3PCBatchSize)

    waitForSufficientRepliesForRequests(looper, client, requests=reqs)
    checkNodesHaveSameRoots(txnPoolNodeSet)

    for r in otherR:
        seqNos = [a['pp'].ppSeqNo for a in getAllArgs(r, r.addToPrePrepares)]
        seqNos.reverse()
        assert sorted(seqNos) == seqNos
def testPrePrepareProcessedInOrder(perf_chk_patched, looper, txnPoolNodeSet,
                                   sdk_pool_handle, sdk_wallet_client):
    """
    A non-primary receives PRE-PREPARE out of order, it receives with ppSeqNo 2
     earlier than it receives the one with ppSeqNo 1 but it stashes the one
     with ppSeqNo 2 and only unstashes it for processing once it has
     processed PRE-PREPARE with ppSeqNo 1
    :return:
    """
    tconf = perf_chk_patched
    pr, otherR = getPrimaryReplica(txnPoolNodeSet, instId=0), \
                 getNonPrimaryReplicas(txnPoolNodeSet, instId=0)
    otherNodes = [r.node for r in otherR]
    ppsToDelay = 2
    delayeds = 0
    expectedDelayeds = (len(txnPoolNodeSet) - 1) * ppsToDelay
    delayedPpSeqNos = set()

    def specificPrePrepares(wrappedMsg):
        nonlocal delayeds
        msg, sender = wrappedMsg
        if isinstance(msg, PrePrepare) and delayeds < expectedDelayeds:
            delayeds += 1
            delayedPpSeqNos.add(msg.ppSeqNo)
            logger.debug('ppSeqNo {} would be delayed'.format(msg.ppSeqNo))
            return pp_delay

    for node in otherNodes:
        logger.debug('{} would be delaying reception of some pre-prepares'.
                     format(node))
        node.nodeIbStasher.delay(specificPrePrepares)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
                              (ppsToDelay + 1) * tconf.Max3PCBatchSize)

    checkNodesHaveSameRoots(txnPoolNodeSet)

    for r in otherR:
        seqNos = [a['pp'].ppSeqNo for a in getAllArgs(r, r._ordering_service._add_to_pre_prepares)]
        seqNos.reverse()
        assert sorted(seqNos) == seqNos
def testPrePrepareProcessedInOrder(perf_chk_patched, looper, txnPoolNodeSet,
                                   sdk_pool_handle, sdk_wallet_client):
    """
    A non-primary receives PRE-PREPARE out of order, it receives with ppSeqNo 2
     earlier than it receives the one with ppSeqNo 1 but it stashes the one
     with ppSeqNo 2 and only unstashes it for processing once it has
     processed PRE-PREPARE with ppSeqNo 1
    :return:
    """
    tconf = perf_chk_patched
    pr, otherR = getPrimaryReplica(txnPoolNodeSet, instId=0), \
                 getNonPrimaryReplicas(txnPoolNodeSet, instId=0)
    otherNodes = [r.node for r in otherR]
    ppsToDelay = 2
    delayeds = 0
    expectedDelayeds = (len(txnPoolNodeSet) - 1) * ppsToDelay
    delayedPpSeqNos = set()

    def specificPrePrepares(wrappedMsg):
        nonlocal delayeds
        msg, sender = wrappedMsg
        if isinstance(msg, PrePrepare) and delayeds < expectedDelayeds:
            delayeds += 1
            delayedPpSeqNos.add(msg.ppSeqNo)
            logger.debug('ppSeqNo {} would be delayed'.format(msg.ppSeqNo))
            return pp_delay

    for node in otherNodes:
        logger.debug('{} would be delaying reception of some pre-prepares'.
                     format(node))
        node.nodeIbStasher.delay(specificPrePrepares)

    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client,
                              (ppsToDelay + 1) * tconf.Max3PCBatchSize)

    checkNodesHaveSameRoots(txnPoolNodeSet)

    for r in otherR:
        seqNos = [a['pp'].ppSeqNo for a in getAllArgs(r, r.addToPrePrepares)]
        seqNos.reverse()
        assert sorted(seqNos) == seqNos
Beispiel #9
0
def test_unordered_state_reverted_before_catchup(tconf, looper, txnPoolNodeSet,
                                                 sdk_wallet_client,
                                                 sdk_pool_handle):
    """
    Check that unordered state is reverted before starting catchup:
    - save the initial state on a node
    - slow down processing of COMMITs
    - send requests
    - wait until other nodes come to consensus
    - call start of catch-up
    - check that the state of the slow node is reverted and equal to the initial one.
    """
    # CONFIG

    ledger_id = DOMAIN_LEDGER_ID
    non_primary_node = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0].node
    non_primary_ledger = non_primary_node.getLedger(ledger_id)
    non_primary_state = non_primary_node.getState(ledger_id)

    # send reqs and make sure we are at the same state

    reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 10)
    sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle)
    checkNodesHaveSameRoots(txnPoolNodeSet)

    # the state of the node before
    committed_ledger_before = non_primary_ledger.tree.root_hash
    uncommitted_ledger_before = non_primary_ledger.uncommittedRootHash
    committed_state_before = non_primary_state.committedHeadHash
    uncommitted_state_before = non_primary_state.headHash

    # EXECUTE

    # Delay commit requests on the node
    non_primary_node.nodeIbStasher.delay(cDelay())
    # Delay Consistency proofs to not finish catchup
    non_primary_node.nodeIbStasher.delay(cpDelay())

    # send requests
    reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client,
                                    tconf.Max3PCBatchSize)
    sdk_get_replies(looper, reqs, timeout=40)

    committed_ledger_during_3pc = non_primary_node.getLedger(
        ledger_id).tree.root_hash
    uncommitted_ledger_during_3pc = non_primary_node.getLedger(
        ledger_id).uncommittedRootHash
    committed_state_during_3pc = non_primary_node.getState(
        ledger_id).committedHeadHash
    uncommitted_state_during_3pc = non_primary_node.getState(
        ledger_id).headHash

    # start catchup
    non_primary_node.start_catchup()

    committed_ledger_reverted = non_primary_ledger.tree.root_hash
    uncommitted_ledger_reverted = non_primary_ledger.uncommittedRootHash
    committed_state_reverted = non_primary_state.committedHeadHash
    uncommitted_state_reverted = non_primary_state.headHash

    # CHECK

    # check that initial uncommitted state differs from the state during 3PC
    #  but committed does not
    assert committed_ledger_before == committed_ledger_during_3pc
    assert uncommitted_ledger_before != uncommitted_ledger_during_3pc
    assert committed_state_before == committed_state_during_3pc
    assert uncommitted_state_before != uncommitted_state_during_3pc

    assert committed_ledger_before == committed_ledger_reverted
    assert uncommitted_ledger_before == uncommitted_ledger_reverted
    assert committed_state_before == committed_state_reverted
    assert uncommitted_state_before == uncommitted_state_reverted
def test_unordered_state_reverted_before_catchup(
        tconf, looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle):
    """
    Check that unordered state is reverted before starting catchup:
    - save the initial state on a node
    - slow down processing of COMMITs
    - send requests
    - wait until other nodes come to consensus
    - call start of catch-up
    - check that the state of the slow node is reverted and equal to the initial one.
    """
    # CONFIG

    ledger_id = DOMAIN_LEDGER_ID
    non_primary_node = getNonPrimaryReplicas(txnPoolNodeSet, instId=0)[0].node
    non_primary_ledger = non_primary_node.getLedger(ledger_id)
    non_primary_state = non_primary_node.getState(ledger_id)

    # send reqs and make sure we are at the same state

    reqs = sdk_signed_random_requests(looper, sdk_wallet_client, 10)
    sdk_send_and_check(reqs, looper, txnPoolNodeSet, sdk_pool_handle)
    checkNodesHaveSameRoots(txnPoolNodeSet)

    # the state of the node before
    committed_ledger_before = non_primary_ledger.tree.root_hash
    uncommitted_ledger_before = non_primary_ledger.uncommittedRootHash
    committed_state_before = non_primary_state.committedHeadHash
    uncommitted_state_before = non_primary_state.headHash

    # EXECUTE

    # Delay commit requests on the node
    delay_c = 60
    non_primary_node.nodeIbStasher.delay(cDelay(delay_c))

    # send requests
    reqs = sdk_send_random_requests(looper, sdk_pool_handle, sdk_wallet_client, tconf.Max3PCBatchSize)
    sdk_get_replies(looper, reqs, timeout=40)

    committed_ledger_during_3pc = non_primary_node.getLedger(
        ledger_id).tree.root_hash
    uncommitted_ledger_during_3pc = non_primary_node.getLedger(
        ledger_id).uncommittedRootHash
    committed_state_during_3pc = non_primary_node.getState(
        ledger_id).committedHeadHash
    uncommitted_state_during_3pc = non_primary_node.getState(
        ledger_id).headHash

    # start catchup
    non_primary_node.ledgerManager.preCatchupClbk(ledger_id)

    committed_ledger_reverted = non_primary_ledger.tree.root_hash
    uncommitted_ledger_reverted = non_primary_ledger.uncommittedRootHash
    committed_state_reverted = non_primary_state.committedHeadHash
    uncommitted_state_reverted = non_primary_state.headHash

    # CHECK

    # check that initial uncommitted state differs from the state during 3PC
    #  but committed does not
    assert committed_ledger_before == committed_ledger_during_3pc
    assert uncommitted_ledger_before != uncommitted_ledger_during_3pc
    assert committed_state_before == committed_state_during_3pc
    assert uncommitted_state_before != uncommitted_state_during_3pc

    assert committed_ledger_before == committed_ledger_reverted
    assert uncommitted_ledger_before == uncommitted_ledger_reverted
    assert committed_state_before == committed_state_reverted
    assert uncommitted_state_before == uncommitted_state_reverted