Example #1
0
def test_update_shared_data_on_mew_view_checkpoint_applied(internal_bus, orderer):
    orderer._data.preprepared = []
    orderer._data.prepared = []
    old_data = copy_shared_data(orderer._data)

    initial_view_no = 3
    new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
    internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
                                                view_changes=new_view.viewChanges,
                                                checkpoint=new_view.checkpoint,
                                                batches=new_view.batches))

    new_data = copy_shared_data(orderer._data)
    check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data)

    # preprepared are created for new view
    if orderer.is_master:
        assert orderer._data.preprepared
        assert orderer._data.preprepared == [BatchID(view_no=initial_view_no + 1, pp_seq_no=batch_id.pp_seq_no,
                                                     pp_digest=batch_id.pp_digest)
                                             for batch_id in new_view.batches]
        assert orderer._data.prepared == []
    else:
        assert orderer._data.preprepared == []
        assert orderer._data.prepared == []
Example #2
0
    def process_new_view_accepted(self, msg: NewViewAccepted):
        if self.is_master:
            cp = msg.checkpoint
            # do not update stable checkpoints if the node doesn't have this checkpoint
            # the node is lagging behind in this case and will start catchup after receiving
            # one more quorum of checkpoints from other nodes
            if cp not in self._data.checkpoints:
                return
            self._mark_checkpoint_stable(cp.seqNoEnd)
            self.set_watermarks(low_watermark=cp.seqNoEnd)
        else:
            # TODO: This is kind of hackery, but proper way would require introducing more
            #  messages specifically for backup replicas. Hope we can live with it for now.
            self._data.waiting_for_new_view = False
            self._data.pp_seq_no = 0
            self.set_watermarks(low_watermark=0)
            self._reset_checkpoints()
            self._data.stable_checkpoint = 0
            self._remove_received_checkpoints()

        self._bus.send(
            NewViewCheckpointsApplied(view_no=msg.view_no,
                                      view_changes=msg.view_changes,
                                      checkpoint=msg.checkpoint,
                                      batches=msg.batches))
def test_re_order_pre_prepares_no_pre_prepares(looper, txnPoolNodeSet,
                                               sdk_wallet_client, sdk_pool_handle):
    # 1. drop PrePrepars, Prepares and Commits on 4thNode
    # Order a couple of requests on Nodes 1-3
    lagging_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, delay_3pc()):
        sdk_send_random_and_check(looper, txnPoolNodeSet,
                                  sdk_pool_handle, sdk_wallet_client, 3)
        assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes)

    with delay_rules_without_processing(lagging_node.nodeIbStasher,
                                        msg_rep_delay(types_to_delay=[PREPREPARE, PREPARE, COMMIT])):
        # 2. simulate view change start so that
        # all PrePrepares/Prepares/Commits are cleared
        # and uncommitted txns are reverted
        for n in txnPoolNodeSet:
            n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1))
            master_ordering_service = n.master_replica._ordering_service
            assert not master_ordering_service.prePrepares
            assert not master_ordering_service.prepares
            assert not master_ordering_service.commits
            ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID]
            state = n.db_manager.states[DOMAIN_LEDGER_ID]
            assert len(ledger.uncommittedTxns) == 0
            assert ledger.uncommitted_root_hash == ledger.tree.root_hash
            assert state.committedHead == state.head

        # check that all nodes but the lagging one have old_view_pps stored
        for n in other_nodes:
            assert n.master_replica._ordering_service.old_view_preprepares
        assert not lagging_node.master_replica._ordering_service.old_view_preprepares

        # 3. Simulate View Change finish to re-order the same PrePrepare
        assert lagging_node.master_last_ordered_3PC == (0, 0)
        new_master = txnPoolNodeSet[1]
        batches = sorted([preprepare_to_batch_id(pp) for _, pp in
                         new_master.master_replica._ordering_service.old_view_preprepares.items()])
        new_view_msg = NewView(viewNo=0,
                               viewChanges=[],
                               checkpoint=None,
                               batches=batches)
        new_view_chk_applied_msg = NewViewCheckpointsApplied(view_no=0,
                                                             view_changes=[],
                                                             checkpoint=None,
                                                             batches=batches)
        for n in txnPoolNodeSet:
            n.master_replica._consensus_data.new_view = new_view_msg
            n.master_replica._consensus_data.prev_view_prepare_cert = batches[-1].pp_seq_no
            n.master_replica._ordering_service._bus.send(new_view_chk_applied_msg)

        # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that
        # the request was eventually ordered on Node4 as well
        waitNodeDataEquality(looper, lagging_node, *other_nodes, customTimeout=60)
        assert lagging_node.master_last_ordered_3PC == (0, 4)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
Example #4
0
def test_re_order_pre_prepares(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle):
    # 0. use new 3PC validator
    for n in txnPoolNodeSet:
        ordering_service = n.master_replica._ordering_service
        ordering_service._validator = OrderingServiceMsgValidator(
            ordering_service._data)

    # 1. drop Prepares and Commits on 4thNode
    # Order a couple of requests on Nodes 1-3
    lagging_node = txnPoolNodeSet[-1]
    other_nodes = txnPoolNodeSet[:-1]
    with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(),
                                        pDelay()):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 3)
        assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes)

    # 2. simulate view change start so that
    # all PrePrepares/Prepares/Commits are cleared
    # and uncommitted txns are reverted
    for n in txnPoolNodeSet:
        n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1))
        master_ordering_service = n.master_replica._ordering_service
        assert not master_ordering_service.prePrepares
        assert not master_ordering_service.prepares
        assert not master_ordering_service.commits
        assert master_ordering_service.old_view_preprepares
        ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID]
        state = n.db_manager.states[DOMAIN_LEDGER_ID]
        assert len(ledger.uncommittedTxns) == 0
        assert ledger.uncommitted_root_hash == ledger.tree.root_hash
        assert state.committedHead == state.head

    # 3. Simulate View Change finish to re-order the same PrePrepare
    assert lagging_node.master_last_ordered_3PC == (0, 0)
    new_master = txnPoolNodeSet[1]
    batches = [
        preprepare_to_batch_id(pp) for _, pp in new_master.master_replica.
        _ordering_service.old_view_preprepares.items()
    ]
    new_view_msg = NewViewCheckpointsApplied(view_no=0,
                                             view_changes=[],
                                             checkpoint=None,
                                             batches=batches)
    for n in txnPoolNodeSet:
        n.master_replica._ordering_service._bus.send(new_view_msg)

    # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that
    # the request was eventually ordered on Node4 as well
    waitNodeDataEquality(looper, lagging_node, *other_nodes)
    assert lagging_node.master_last_ordered_3PC == (0, 3)

    sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client,
                               sdk_pool_handle)
Example #5
0
    def process_new_view_accepted(self, msg: NewViewAccepted):
        # 1. update shared data
        cp = msg.checkpoint
        if cp not in self._data.checkpoints:
            self._data.checkpoints.append(cp)
        self._mark_checkpoint_stable(cp.seqNoEnd)
        self.set_watermarks(low_watermark=cp.seqNoEnd)

        # 2. send NewViewCheckpointsApplied
        self._bus.send(
            NewViewCheckpointsApplied(view_no=msg.view_no,
                                      view_changes=msg.view_changes,
                                      checkpoint=msg.checkpoint,
                                      batches=msg.batches))
        return PROCESS, None
def test_do_nothing_on_new_view_checkpoint_applied(internal_bus, view_change_service):
    view_change_service._data.waiting_for_new_view = False
    view_change_service._data.view_no = 1
    view_change_service._data.primary_name = "Alpha"
    view_change_service._data.primaries = ["Alpha", "Beta"]
    old_data = copy_shared_data(view_change_service._data)

    new_view = create_new_view(initial_view_no=3, stable_cp=200)
    internal_bus.send(NewViewCheckpointsApplied(view_no=4,
                                                view_changes=new_view.viewChanges,
                                                checkpoint=new_view.checkpoint,
                                                batches=new_view.batches))

    new_data = copy_shared_data(view_change_service._data)
    assert old_data == new_data
def test_do_nothing_on_new_view_checkpoint_applied(internal_bus, checkpoint_service):
    checkpoint_service._data.checkpoints.clear()
    checkpoint_service._data.checkpoints.update(create_checkpoints(view_no=0))
    checkpoint_service._data.stable_checkpoint = 100
    checkpoint_service._data.low_watermark = 100
    checkpoint_service._data.high_watermark = checkpoint_service._data.low_watermark + 300
    old_data = copy_shared_data(checkpoint_service._data)

    initial_view_no = 3
    new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
    internal_bus.send(NewViewCheckpointsApplied(view_no=initial_view_no + 1,
                                                view_changes=new_view.viewChanges,
                                                checkpoint=new_view.checkpoint,
                                                batches=new_view.batches))

    new_data = copy_shared_data(checkpoint_service._data)
    assert old_data == new_data
Example #8
0
def test_view_change_finished_sends_new_view_checkpoint_applied(
        internal_bus, checkpoint_service):
    handler = Mock()
    internal_bus.subscribe(NewViewCheckpointsApplied, handler)

    initial_view_no = 3
    new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
    internal_bus.send(
        NewViewAccepted(view_no=initial_view_no + 1,
                        view_changes=new_view.viewChanges,
                        checkpoint=new_view.checkpoint,
                        batches=new_view.batches))
    expected_apply_new_view = NewViewCheckpointsApplied(
        view_no=initial_view_no + 1,
        view_changes=new_view.viewChanges,
        checkpoint=new_view.checkpoint,
        batches=new_view.batches)

    handler.assert_called_once_with(expected_apply_new_view)
Example #9
0
def test_unstash_waiting_new_view_on_new_view_checkpoint_applied(
        external_bus, internal_bus, replica_service):
    replica_service._data.view_no = 2
    replica_service._data.node_mode = Mode.participating
    replica_service._data.waiting_for_new_view = True

    external_bus.process_incoming(create_commit_no_bls_sig(req_key=(2, 10)),
                                  replica_service._data.primary_name)
    assert replica_service.stasher.stash_size(STASH_WAITING_NEW_VIEW) == 1

    new_view = create_new_view(initial_view_no=1, stable_cp=200)
    replica_service._data.waiting_for_new_view = False
    internal_bus.send(
        NewViewCheckpointsApplied(view_no=2,
                                  view_changes=new_view.viewChanges,
                                  checkpoint=new_view.checkpoint,
                                  batches=new_view.batches))

    assert replica_service.stasher.stash_size(STASH_WAITING_NEW_VIEW) == 0
def test_update_shared_data_on_new_view_checkpoint_applied(
        internal_bus, orderer):
    initial_view_no = 3
    orderer._data.preprepared = []
    orderer._data.prepared = []
    orderer._data.view_no = initial_view_no + 1
    old_data = copy_shared_data(orderer._data)

    new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
    internal_bus.send(
        NewViewCheckpointsApplied(view_no=initial_view_no + 1,
                                  view_changes=new_view.viewChanges,
                                  checkpoint=new_view.checkpoint,
                                  batches=new_view.batches))

    new_data = copy_shared_data(orderer._data)
    check_service_changed_only_owned_fields_in_shared_data(
        OrderingService, old_data, new_data)

    # Since we didn't order the PrePrepare from Batches, it should not be added into shared data
    # (we will request the PrePrepares instead, see next tests)
    assert orderer._data.preprepared == []
    assert orderer._data.prepared == []
def test_view_change_finished_sends_new_view_checkpoint_applied(internal_bus, checkpoint_service, is_master):
    # TODO: Need to decide on how we handle this case
    if not is_master:
        return

    handler = Mock()
    internal_bus.subscribe(NewViewCheckpointsApplied, handler)

    initial_view_no = 3
    checkpoint_service._data.checkpoints.clear()
    checkpoint_service._data.checkpoints.update(
        [Checkpoint(instId=0, viewNo=3, seqNoStart=0, seqNoEnd=200, digest=cp_digest(200))])
    new_view = create_new_view(initial_view_no=initial_view_no, stable_cp=200)
    internal_bus.send(NewViewAccepted(view_no=initial_view_no + 1,
                                      view_changes=new_view.viewChanges,
                                      checkpoint=new_view.checkpoint,
                                      batches=new_view.batches))
    expected_apply_new_view = NewViewCheckpointsApplied(view_no=initial_view_no + 1,
                                                        view_changes=new_view.viewChanges,
                                                        checkpoint=new_view.checkpoint,
                                                        batches=new_view.batches)

    handler.assert_called_once_with(expected_apply_new_view)
def new_view(view_no):
    return NewViewCheckpointsApplied(view_no, [], [], [])
def test_process_preprepare_on_new_view_checkpoint_applied(
        internal_bus, external_bus, orderer, is_primary, all_ordered,
        initial_view_no, pre_prepares, stored_old_view_pre_prepares):
    # !!!SETUP!!!
    orderer._data.view_no = initial_view_no + 1
    batches = create_batches_from_preprepares(pre_prepares)
    orderer._data.prev_view_prepare_cert = batches[-1].pp_seq_no

    new_view = create_new_view(initial_view_no=initial_view_no,
                               stable_cp=200,
                               batches=batches)

    # emulate that we received all PrePrepares before View Change
    orderer._update_old_view_preprepares(stored_old_view_pre_prepares)

    # emulate that we've already ordered the PrePrepares
    if all_ordered and stored_old_view_pre_prepares:
        orderer.last_ordered_3pc = (initial_view_no,
                                    stored_old_view_pre_prepares[-1].ppSeqNo)

    # !!!EXECUTE!!!
    # send NewViewCheckpointsApplied
    internal_bus.send(
        NewViewCheckpointsApplied(view_no=initial_view_no + 1,
                                  view_changes=new_view.viewChanges,
                                  checkpoint=new_view.checkpoint,
                                  batches=new_view.batches))

    # !!!CHECK!!!
    if not orderer.is_master:
        # no re-ordering is expected on non-master
        assert orderer._data.preprepared == []
        assert orderer._data.prepared == []
        return

    # check that PPs were added
    stored_batch_ids = [
        preprepare_to_batch_id(pp) for pp in stored_old_view_pre_prepares
    ]
    assert orderer._data.preprepared == [
        BatchID(view_no=initial_view_no + 1,
                pp_view_no=initial_view_no,
                pp_seq_no=batch_id.pp_seq_no,
                pp_digest=batch_id.pp_digest) for batch_id in new_view.batches
        if batch_id in stored_batch_ids
    ]

    # check that sentPrePrepares is updated in case of Primary and prePrepares in case of non-primary
    updated_prepares_collection = orderer.prePrepares if not is_primary else orderer.sent_preprepares
    non_updated_prepares_collection = orderer.sent_preprepares if not is_primary else orderer.prePrepares
    for pp in stored_old_view_pre_prepares:
        new_pp = updateNamedTuple(pp,
                                  viewNo=initial_view_no + 1,
                                  originalViewNo=pp.viewNo)
        assert (initial_view_no + 1,
                new_pp.ppSeqNo) in updated_prepares_collection
        assert updated_prepares_collection[(initial_view_no + 1,
                                            new_pp.ppSeqNo)] == new_pp
    assert not non_updated_prepares_collection

    # check that Prepare is sent in case of non primary
    if not is_primary:
        check_prepares_sent(external_bus, stored_old_view_pre_prepares,
                            initial_view_no + 1)
    else:
        #  only MessageReqs are sent
        assert len(external_bus.sent_messages
                   ) == len(pre_prepares) - len(stored_old_view_pre_prepares)

    # we don't have a quorum of Prepares yet
    assert orderer._data.prepared == []

    # check that missing PrePrepares have been requested
    expected_requested_batches = [
        batch_id for batch_id in new_view.batches
        if batch_id not in stored_batch_ids
    ]
    check_request_old_view_preprepares_sent(external_bus,
                                            expected_requested_batches)