def test_process_ordered(checkpoint_service, ordered, pre_prepare, tconf):
    with pytest.raises(
            LogicError,
            match="CheckpointService | Can't process Ordered msg because "
            "ppSeqNo {} not in preprepared".format(ordered.ppSeqNo)):
        checkpoint_service.process_ordered(ordered)

    checkpoint_service._data.preprepared.append(
        preprepare_to_batch_id(pre_prepare))
    checkpoint_service.process_ordered(ordered)
    _check_checkpoint(checkpoint_service, tconf.CHK_FREQ, pre_prepare)

    pre_prepare.ppSeqNo = tconf.CHK_FREQ
    ordered.ppSeqNo = pre_prepare.ppSeqNo
    checkpoint_service._data.preprepared.append(
        preprepare_to_batch_id(pre_prepare))
    checkpoint_service.process_ordered(ordered)
    _check_checkpoint(checkpoint_service,
                      tconf.CHK_FREQ,
                      pre_prepare,
                      check_shared_data=True)

    pre_prepare.ppSeqNo += 1
    ordered.ppSeqNo = pre_prepare.ppSeqNo
    checkpoint_service._data.preprepared.append(
        preprepare_to_batch_id(pre_prepare))
    checkpoint_service.process_ordered(ordered)
    _check_checkpoint(checkpoint_service, tconf.CHK_FREQ * 2, pre_prepare)
def test_send_reply_on_old_view_pre_prepares_request(
        external_bus, orderer, initial_view_no, stored_old_view_pre_prepares,
        requested_old_view_pre_prepares):
    # Setup
    orderer._data.view_no = initial_view_no + 2

    orderer._update_old_view_preprepares(stored_old_view_pre_prepares)

    # Receive OldViewPrePrepareRequest req
    batches = [
        preprepare_to_batch_id(pp) for pp in requested_old_view_pre_prepares
    ]
    req = OldViewPrePrepareRequest(0, batches)
    frm = "node1"
    orderer._network.process_incoming(req,
                                      generateName(frm, orderer._data.inst_id))

    # Check that OldViewPrePrepareReply is sent  for all requested PrePrepares
    if not orderer.is_master:
        assert len(external_bus.sent_messages) == 0
        return
    # equal to set's union operation
    expected_pps = [
        i for i in stored_old_view_pre_prepares
        if i in requested_old_view_pre_prepares
    ]
    expected_pps = sorted(expected_pps, key=lambda pp: pp.ppSeqNo)
    check_reply_old_view_preprepares_sent(external_bus, frm, expected_pps)
Beispiel #3
0
def test_node_reg_in_ordered_from_audit_for_tree_txns(test_node):
    node_regs = {}
    replica = test_node.master_replica
    node_reg = ["Alpha", "Beta", "Gamma", "Delta", "Eta"]
    for i in range(3):
        pp = create_pre_prepare_no_bls(state_root=generate_state_root(),
                                       pp_seq_no=i)
        key = (pp.viewNo, pp.ppSeqNo)
        replica._ordering_service.prePrepares[key] = pp
        replica._consensus_data.preprepared.append(preprepare_to_batch_id(pp))
        three_pc_batch = ThreePcBatch.from_pre_prepare(
            pre_prepare=pp,
            state_root=pp.stateRootHash,
            txn_root=pp.txnRootHash,
            valid_digests=pp.reqIdr)
        three_pc_batch.node_reg = node_reg + ["Node{}".format(i + 10)]
        three_pc_batch.primaries = ["Alpha", "Beta"]
        test_node.write_manager.audit_b_handler.post_batch_applied(
            three_pc_batch)
        node_regs[key] = three_pc_batch.node_reg

    for key in reversed(list(node_regs.keys())):
        pp = replica._ordering_service.get_preprepare(*key)
        assert replica._ordering_service._get_node_reg_for_ordered(
            pp) == node_regs[key]
def test_re_order_pre_prepares_no_pre_prepares(looper, txnPoolNodeSet,
                                               sdk_wallet_client, sdk_pool_handle):
    # 1. drop PrePrepars, Prepares and Commits on 4thNode
    # Order a couple of requests on Nodes 1-3
    lagging_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, delay_3pc()):
        sdk_send_random_and_check(looper, txnPoolNodeSet,
                                  sdk_pool_handle, sdk_wallet_client, 3)
        assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes)

    with delay_rules_without_processing(lagging_node.nodeIbStasher,
                                        msg_rep_delay(types_to_delay=[PREPREPARE, PREPARE, COMMIT])):
        # 2. simulate view change start so that
        # all PrePrepares/Prepares/Commits are cleared
        # and uncommitted txns are reverted
        for n in txnPoolNodeSet:
            n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1))
            master_ordering_service = n.master_replica._ordering_service
            assert not master_ordering_service.prePrepares
            assert not master_ordering_service.prepares
            assert not master_ordering_service.commits
            ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID]
            state = n.db_manager.states[DOMAIN_LEDGER_ID]
            assert len(ledger.uncommittedTxns) == 0
            assert ledger.uncommitted_root_hash == ledger.tree.root_hash
            assert state.committedHead == state.head

        # check that all nodes but the lagging one have old_view_pps stored
        for n in other_nodes:
            assert n.master_replica._ordering_service.old_view_preprepares
        assert not lagging_node.master_replica._ordering_service.old_view_preprepares

        # 3. Simulate View Change finish to re-order the same PrePrepare
        assert lagging_node.master_last_ordered_3PC == (0, 0)
        new_master = txnPoolNodeSet[1]
        batches = sorted([preprepare_to_batch_id(pp) for _, pp in
                         new_master.master_replica._ordering_service.old_view_preprepares.items()])
        new_view_msg = NewView(viewNo=0,
                               viewChanges=[],
                               checkpoint=None,
                               batches=batches)
        new_view_chk_applied_msg = NewViewCheckpointsApplied(view_no=0,
                                                             view_changes=[],
                                                             checkpoint=None,
                                                             batches=batches)
        for n in txnPoolNodeSet:
            n.master_replica._consensus_data.new_view = new_view_msg
            n.master_replica._consensus_data.prev_view_prepare_cert = batches[-1].pp_seq_no
            n.master_replica._ordering_service._bus.send(new_view_chk_applied_msg)

        # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that
        # the request was eventually ordered on Node4 as well
        waitNodeDataEquality(looper, lagging_node, *other_nodes, customTimeout=60)
        assert lagging_node.master_last_ordered_3PC == (0, 4)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_process_checkpoint(checkpoint_service, checkpoint, pre_prepare, tconf,
                            ordered, validators, is_master):
    checkpoint_stabilized_handler = Mock()
    checkpoint_service._bus.subscribe(CheckpointStabilized,
                                      checkpoint_stabilized_handler)
    quorum = checkpoint_service._data.quorums.checkpoint.value
    n = len(validators)
    assert quorum == n - getMaxFailures(n) - 1
    senders = ["sender{}".format(i) for i in range(quorum + 1)]

    till_seq_no = tconf.CHK_FREQ

    checkpoint_service._received_checkpoints[cp_key(checkpoint.viewNo,
                                                    1)] = {"frm"}
    # For now, on checkpoint stabilization phase all checkpoints
    # with ppSeqNo less then stable_checkpoint will be removed
    checkpoint_service._received_checkpoints[cp_key(
        checkpoint.viewNo + 1, till_seq_no + 100)] = {"frm"}

    pre_prepare.ppSeqNo = till_seq_no
    pre_prepare.auditTxnRootHash = cp_digest(till_seq_no)
    ordered.ppSeqNo = pre_prepare.ppSeqNo
    ordered.auditTxnRootHash = pre_prepare.auditTxnRootHash
    checkpoint_service._data.preprepared.append(
        preprepare_to_batch_id(pre_prepare))
    checkpoint_service.process_ordered(ordered)

    _check_checkpoint(checkpoint_service,
                      till_seq_no,
                      pre_prepare,
                      check_shared_data=True)

    for sender in senders[:quorum - 1]:
        checkpoint_service.process_checkpoint(checkpoint, sender)
    assert checkpoint_service._data.stable_checkpoint < till_seq_no

    # send the last checkpoint to stable it
    checkpoint_service.process_checkpoint(checkpoint, senders[quorum - 1])
    assert checkpoint_service._data.stable_checkpoint == till_seq_no

    # check _remove_stashed_checkpoints()
    assert sum(1 for cp in checkpoint_service._received_checkpoints
               if cp.view_no == checkpoint.viewNo) == 0
    assert sum(1 for cp in checkpoint_service._received_checkpoints
               if cp.view_no == checkpoint.viewNo + 1) > 0

    # check watermarks
    assert checkpoint_service._data.low_watermark == checkpoint.seqNoEnd

    # check that a Cleanup msg has been sent
    checkpoint_stabilized_handler.assert_called_once_with(
        CheckpointStabilized(last_stable_3pc=(checkpoint.viewNo,
                                              checkpoint.seqNoEnd)))
def test_cleanup_after_checkpoint_stabilize(orderer):
    pre_prepares = [
        create_pre_prepare_no_bls(generate_state_root(),
                                  view_no=0,
                                  pp_seq_no=1),
        create_pre_prepare_no_bls(generate_state_root(),
                                  view_no=1,
                                  pp_seq_no=2),
        create_pre_prepare_no_bls(generate_state_root(),
                                  view_no=1,
                                  pp_seq_no=3)
    ]
    dicts_to_cleaning = [
        orderer.pre_prepare_tss, orderer.sent_preprepares, orderer.prePrepares,
        orderer.prepares, orderer.commits, orderer.batches,
        orderer.pre_prepares_stashed_for_incorrect_time
    ]
    lists_to_cleaning = [orderer._data.prepared, orderer._data.preprepared]
    for pp in pre_prepares:
        for dict_to_cleaning in dicts_to_cleaning:
            dict_to_cleaning[(pp.viewNo, pp.ppSeqNo)] = pp
        for list_to_cleaning in lists_to_cleaning:
            list_to_cleaning.append(preprepare_to_batch_id(pp))

    orderer._bus.send(CheckpointStabilized(last_stable_3pc=(1, 2)))

    for pp in pre_prepares[:2]:
        for dict_to_cleaning in dicts_to_cleaning:
            assert (pp.viewNo, pp.ppSeqNo) not in dict_to_cleaning
        for list_to_cleaning in lists_to_cleaning:
            assert preprepare_to_batch_id(pp) not in list_to_cleaning
    for dict_to_cleaning in dicts_to_cleaning:
        assert (pre_prepares[2].viewNo,
                pre_prepares[2].ppSeqNo) in dict_to_cleaning
    for list_to_cleaning in lists_to_cleaning:
        assert preprepare_to_batch_id(pre_prepares[2]) in list_to_cleaning
Beispiel #7
0
def test_node_reg_in_ordered_from_audit(test_node):
    pre_prepare = create_pre_prepare_no_bls(state_root=generate_state_root(),
                                            pp_seq_no=1)
    replica = test_node.master_replica
    key = (pre_prepare.viewNo, pre_prepare.ppSeqNo)
    replica._ordering_service.prePrepares[key] = pre_prepare
    replica._consensus_data.preprepared.append(
        preprepare_to_batch_id(pre_prepare))
    three_pc_batch = ThreePcBatch.from_pre_prepare(
        pre_prepare=pre_prepare,
        state_root=pre_prepare.stateRootHash,
        txn_root=pre_prepare.txnRootHash,
        valid_digests=pre_prepare.reqIdr)
    three_pc_batch.node_reg = ["Alpha", "Beta", "Gamma", "Delta", "Eta"]
    three_pc_batch.primaries = ["Alpha", "Beta"]
    test_node.write_manager.audit_b_handler.post_batch_applied(three_pc_batch)

    assert replica._ordering_service._get_node_reg_for_ordered(
        pre_prepare) == three_pc_batch.node_reg
Beispiel #8
0
def test_process_message_rep_already_ordered_preprepare(
        message_req_service: MessageReqService, external_bus, data, pp):
    key = (pp.viewNo, pp.ppSeqNo)
    data.preprepared.append(preprepare_to_batch_id(pp))
    message_req_service.handlers[PREPREPARE].requested_messages[key] = None
    message_rep = MessageRep(
        **{
            f.MSG_TYPE.nm: PREPREPARE,
            f.PARAMS.nm: {
                f.INST_ID.nm: data.inst_id,
                f.VIEW_NO.nm: key[0],
                f.PP_SEQ_NO.nm: key[1]
            },
            f.MSG.nm: dict(pp.items())
        })
    frm = "frm"
    network_handler = Mock()
    external_bus.subscribe(PrePrepare, network_handler)
    message_req_service.process_message_rep(message_rep, frm)
    network_handler.assert_not_called()
Beispiel #9
0
def create_batches_from_preprepares(preprepares):
    return [preprepare_to_batch_id(pp) for pp in preprepares]
def test_process_preprepare_on_new_view_checkpoint_applied(
        internal_bus, external_bus, orderer, is_primary, all_ordered,
        initial_view_no, pre_prepares, stored_old_view_pre_prepares):
    # !!!SETUP!!!
    orderer._data.view_no = initial_view_no + 1
    batches = create_batches_from_preprepares(pre_prepares)
    orderer._data.prev_view_prepare_cert = batches[-1].pp_seq_no

    new_view = create_new_view(initial_view_no=initial_view_no,
                               stable_cp=200,
                               batches=batches)

    # emulate that we received all PrePrepares before View Change
    orderer._update_old_view_preprepares(stored_old_view_pre_prepares)

    # emulate that we've already ordered the PrePrepares
    if all_ordered and stored_old_view_pre_prepares:
        orderer.last_ordered_3pc = (initial_view_no,
                                    stored_old_view_pre_prepares[-1].ppSeqNo)

    # !!!EXECUTE!!!
    # send NewViewCheckpointsApplied
    internal_bus.send(
        NewViewCheckpointsApplied(view_no=initial_view_no + 1,
                                  view_changes=new_view.viewChanges,
                                  checkpoint=new_view.checkpoint,
                                  batches=new_view.batches))

    # !!!CHECK!!!
    if not orderer.is_master:
        # no re-ordering is expected on non-master
        assert orderer._data.preprepared == []
        assert orderer._data.prepared == []
        return

    # check that PPs were added
    stored_batch_ids = [
        preprepare_to_batch_id(pp) for pp in stored_old_view_pre_prepares
    ]
    assert orderer._data.preprepared == [
        BatchID(view_no=initial_view_no + 1,
                pp_view_no=initial_view_no,
                pp_seq_no=batch_id.pp_seq_no,
                pp_digest=batch_id.pp_digest) for batch_id in new_view.batches
        if batch_id in stored_batch_ids
    ]

    # check that sentPrePrepares is updated in case of Primary and prePrepares in case of non-primary
    updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
    non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
    for pp in stored_old_view_pre_prepares:
        new_pp = updateNamedTuple(pp,
                                  viewNo=initial_view_no + 1,
                                  originalViewNo=pp.viewNo)
        assert (initial_view_no + 1,
                new_pp.ppSeqNo) in updated_prepares_collection
        assert updated_prepares_collection[(initial_view_no + 1,
                                            new_pp.ppSeqNo)] == new_pp
    assert not non_updated_prepares_collection

    # check that Prepare is sent in case of non primary
    if not is_primary:
        check_prepares_sent(external_bus, stored_old_view_pre_prepares,
                            initial_view_no + 1)
    else:
        #  only MessageReqs are sent
        assert len(external_bus.sent_messages
                   ) == len(pre_prepares) - len(stored_old_view_pre_prepares)

    # we don't have a quorum of Prepares yet
    assert orderer._data.prepared == []

    # check that missing PrePrepares have been requested
    expected_requested_batches = [
        batch_id for batch_id in new_view.batches
        if batch_id not in stored_batch_ids
    ]
    check_request_old_view_preprepares_sent(external_bus,
                                            expected_requested_batches)