def test_start_view_change_sends_view_change_started(internal_bus, view_change_service, initial_view_no): handler = Mock() internal_bus.subscribe(ViewChangeStarted, handler) internal_bus.send(NeedViewChange()) handler.assert_called_once_with( ViewChangeStarted(view_no=initial_view_no + 1)) internal_bus.send(NeedViewChange(view_no=5)) handler.assert_called_with(ViewChangeStarted(view_no=5))
def test_start_view_change_sends_view_change_started(internal_bus, view_change_service, initial_view_no, is_master): # TODO: Need to decide on how we handle this case if not is_master: return handler = Mock() internal_bus.subscribe(ViewChangeStarted, handler) internal_bus.send(NeedViewChange()) handler.assert_called_once_with(ViewChangeStarted(view_no=initial_view_no + 1)) internal_bus.send(NeedViewChange(view_no=5)) handler.assert_called_with(ViewChangeStarted(view_no=5))
def process_need_view_change(self, msg: NeedViewChange): # 1. calculate new viewno view_no = msg.view_no if view_no is None: view_no = self._data.view_no + 1 # 2. Do cleanup before new view change starts self._clean_on_view_change_start() # 3. Update shared data self._data.view_no = view_no self._data.waiting_for_new_view = True self._data.primaries = self._primaries_selector.select_primaries( view_no=self._data.view_no, instance_count=self._data.quorums.f + 1, validators=self._data.validators) self._data.primary_name = self._data.primaries[self._data.inst_id] # 4. Build ViewChange message vc = self._build_view_change_msg() # 5. Send ViewChangeStarted via internal bus to update other services self._bus.send(ViewChangeStarted(view_no=self._data.view_no)) # 6. Send ViewChange msg to other nodes (via external bus) self._network.send(vc) self._votes.add_view_change(vc, self._data.name) # 6. Unstash messages for new view self._router.process_all_stashed()
def process_need_view_change(self, msg: NeedViewChange): logger.info("{} processing {}".format(self, msg)) # 1. calculate new viewno view_no = msg.view_no if view_no is None: view_no = self._data.view_no + 1 # 2. Do cleanup before new view change starts self._clean_on_view_change_start() # 3. Update shared data self._data.view_no = view_no self._data.waiting_for_new_view = True old_primary = self._data.primary_name self._data.primary_name = None if not self._data.is_master: self._data.master_reordered_after_vc = False return # Only the master primary is selected at the beginning of view change as we need to get a NEW_VIEW and do re-ordering on master # Backup primaries will not be selected (and backups will not order) until re-ordering of txns from previous view on master is finished # More precisely, it will be done after the first batch in a new view is committed # This is done so as N and F may change as a result of NODE txns ordered in last view, # so we need a synchronous point of updating N, F, number of replicas and backup primaris # Beginning of view (when the first batch in a view is ordered) is such a point. self._data.primary_name = generateName( self._primaries_selector.select_master_primary(self._data.view_no), self._data.inst_id) if old_primary and self._data.primary_name == old_primary: logger.info("Selected master primary is the same with the " "current master primary (new_view {}). " "Propose a new view {}".format(self._data.view_no, self._data.view_no + 1)) self._propose_view_change(Suspicions.INCORRECT_NEW_PRIMARY) logger.info( "{} started view change to view {}. Expected Master Primary: {}". format(self._data.name, self._data.view_no, self._data.primary_name)) # 4. Build ViewChange message vc = self._build_view_change_msg() # 5. Send ViewChangeStarted via internal bus to update other services logger.info("{} sending {}".format(self, vc)) self._bus.send(ViewChangeStarted(view_no=self._data.view_no)) # 6. Send ViewChange msg to other nodes (via external bus) self._network.send(vc) self.view_change_votes.add_view_change(vc, self._data.name) # 7. Unstash messages for view change self._router.process_all_stashed(STASH_WAITING_VIEW_CHANGE) self._stashed_vc_msgs.clear() # 8. Restart instance change timer self._resend_inst_change_timer.stop() self._resend_inst_change_timer.start()
def process_need_view_change(self, msg: NeedViewChange): self._logger.info("{} processing {}".format(self, msg)) # 1. calculate new viewno view_no = msg.view_no if view_no is None: view_no = self._data.view_no + 1 # 2. Do cleanup before new view change starts self._clean_on_view_change_start() # 3. Update shared data self._data.view_no = view_no self._data.waiting_for_new_view = True self._data.primaries = self._primaries_selector.select_primaries( view_no=self._data.view_no, instance_count=self._data.quorums.f + 1, validators=self._data.validators) for i, primary_name in enumerate(self._data.primaries): self._logger.display( "{} selected primary {} for instance {} (view {})".format( PRIMARY_SELECTION_PREFIX, primary_name, i, self._data.view_no), extra={ "cli": "ANNOUNCE", "tags": ["node-election"] }) old_primary = self._data.primary_name self._data.primary_name = generateName( self._data.primaries[self._data.inst_id], self._data.inst_id) if not self._data.is_master: return if old_primary and self._data.primary_name == old_primary: self._logger.info("Selected master primary is the same with the " "current master primary (new_view {}). " "Propose a new view {}".format( self._data.view_no, self._data.view_no + 1)) self._propose_view_change(Suspicions.INCORRECT_NEW_PRIMARY.code) # 4. Build ViewChange message vc = self._build_view_change_msg() # 5. Send ViewChangeStarted via internal bus to update other services self._logger.info("{} sending {}".format(self, vc)) self._bus.send(ViewChangeStarted(view_no=self._data.view_no)) # 6. Send ViewChange msg to other nodes (via external bus) self._network.send(vc) self.view_change_votes.add_view_change(vc, self._data.name) # 7. Unstash messages for view change self._router.process_all_stashed(STASH_WAITING_VIEW_CHANGE) # 8. Restart instance change timer self._resend_inst_change_timer.stop() self._resend_inst_change_timer.start()
def test_process_view_change_started(message_req_service: MessageReqService, internal_bus, data, fill_requested_lists): handler = message_req_service.handlers[VIEW_CHANGE] handler._received_vc[("NodeName", "digest")] = {"Node1", "Node2"} msg = ViewChangeStarted(0) internal_bus.send(msg) assert not handler.requested_messages assert not handler._received_vc
def test_re_order_pre_prepares_no_pre_prepares(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): # 1. drop PrePrepars, Prepares and Commits on 4thNode # Order a couple of requests on Nodes 1-3 lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] with delay_rules_without_processing(lagging_node.nodeIbStasher, delay_3pc()): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 3) assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes) with delay_rules_without_processing(lagging_node.nodeIbStasher, msg_rep_delay(types_to_delay=[PREPREPARE, PREPARE, COMMIT])): # 2. simulate view change start so that # all PrePrepares/Prepares/Commits are cleared # and uncommitted txns are reverted for n in txnPoolNodeSet: n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1)) master_ordering_service = n.master_replica._ordering_service assert not master_ordering_service.prePrepares assert not master_ordering_service.prepares assert not master_ordering_service.commits ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID] state = n.db_manager.states[DOMAIN_LEDGER_ID] assert len(ledger.uncommittedTxns) == 0 assert ledger.uncommitted_root_hash == ledger.tree.root_hash assert state.committedHead == state.head # check that all nodes but the lagging one have old_view_pps stored for n in other_nodes: assert n.master_replica._ordering_service.old_view_preprepares assert not lagging_node.master_replica._ordering_service.old_view_preprepares # 3. Simulate View Change finish to re-order the same PrePrepare assert lagging_node.master_last_ordered_3PC == (0, 0) new_master = txnPoolNodeSet[1] batches = sorted([preprepare_to_batch_id(pp) for _, pp in new_master.master_replica._ordering_service.old_view_preprepares.items()]) new_view_msg = NewView(viewNo=0, viewChanges=[], checkpoint=None, batches=batches) new_view_chk_applied_msg = NewViewCheckpointsApplied(view_no=0, view_changes=[], checkpoint=None, batches=batches) for n in txnPoolNodeSet: n.master_replica._consensus_data.new_view = new_view_msg n.master_replica._consensus_data.prev_view_prepare_cert = batches[-1].pp_seq_no n.master_replica._ordering_service._bus.send(new_view_chk_applied_msg) # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that # the request was eventually ordered on Node4 as well waitNodeDataEquality(looper, lagging_node, *other_nodes, customTimeout=60) assert lagging_node.master_last_ordered_3PC == (0, 4) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_re_order_pre_prepares(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle): # 0. use new 3PC validator for n in txnPoolNodeSet: ordering_service = n.master_replica._ordering_service ordering_service._validator = OrderingServiceMsgValidator( ordering_service._data) # 1. drop Prepares and Commits on 4thNode # Order a couple of requests on Nodes 1-3 lagging_node = txnPoolNodeSet[-1] other_nodes = txnPoolNodeSet[:-1] with delay_rules_without_processing(lagging_node.nodeIbStasher, cDelay(), pDelay()): sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 3) assert all(n.master_last_ordered_3PC == (0, 3) for n in other_nodes) # 2. simulate view change start so that # all PrePrepares/Prepares/Commits are cleared # and uncommitted txns are reverted for n in txnPoolNodeSet: n.replicas.send_to_internal_bus(ViewChangeStarted(view_no=1)) master_ordering_service = n.master_replica._ordering_service assert not master_ordering_service.prePrepares assert not master_ordering_service.prepares assert not master_ordering_service.commits assert master_ordering_service.old_view_preprepares ledger = n.db_manager.ledgers[DOMAIN_LEDGER_ID] state = n.db_manager.states[DOMAIN_LEDGER_ID] assert len(ledger.uncommittedTxns) == 0 assert ledger.uncommitted_root_hash == ledger.tree.root_hash assert state.committedHead == state.head # 3. Simulate View Change finish to re-order the same PrePrepare assert lagging_node.master_last_ordered_3PC == (0, 0) new_master = txnPoolNodeSet[1] batches = [ preprepare_to_batch_id(pp) for _, pp in new_master.master_replica. _ordering_service.old_view_preprepares.items() ] new_view_msg = NewViewCheckpointsApplied(view_no=0, view_changes=[], checkpoint=None, batches=batches) for n in txnPoolNodeSet: n.master_replica._ordering_service._bus.send(new_view_msg) # 4. Make sure that the nodes 1-3 (that already ordered the requests) sent Prepares and Commits so that # the request was eventually ordered on Node4 as well waitNodeDataEquality(looper, lagging_node, *other_nodes) assert lagging_node.master_last_ordered_3PC == (0, 3) sdk_ensure_pool_functional(looper, txnPoolNodeSet, sdk_wallet_client, sdk_pool_handle)
def test_do_nothing_on_view_change_started(internal_bus, view_change_service): view_change_service._data.waiting_for_new_view = False view_change_service._data.view_no = 1 view_change_service._data.primary_name = "Alpha" view_change_service._data.primaries = ["Alpha", "Beta"] old_data = copy_shared_data(view_change_service._data) internal_bus.send(ViewChangeStarted(view_no=4)) new_data = copy_shared_data(view_change_service._data) assert old_data == new_data
def test_do_nothing_on_view_change_started(internal_bus, checkpoint_service): checkpoint_service._data.checkpoints.update(create_checkpoints(view_no=0)) checkpoint_service._data.stable_checkpoint = 200 checkpoint_service._data.low_watermark = 200 checkpoint_service._data.high_watermark = checkpoint_service._data.low_watermark + 300 old_data = copy_shared_data(checkpoint_service._data) internal_bus.send(ViewChangeStarted(view_no=4)) new_data = copy_shared_data(checkpoint_service._data) assert old_data == new_data
def test_update_shared_data_on_view_change_started(internal_bus, orderer): orderer._data.preprepared = create_batches(view_no=3) orderer._data.prepared = create_batches(view_no=3) old_data = copy_shared_data(orderer._data) internal_bus.send(ViewChangeStarted(view_no=4)) new_data = copy_shared_data(orderer._data) check_service_changed_only_owned_fields_in_shared_data(OrderingService, old_data, new_data) assert orderer._data.preprepared == [] assert orderer._data.prepared == []
def test_stores_old_pre_prepares_on_view_change_started(internal_bus, orderer): pp1 = create_pre_prepare_no_bls(generate_state_root(), view_no=0, pp_seq_no=1, inst_id=0) pp2 = create_pre_prepare_no_bls(generate_state_root(), view_no=0, pp_seq_no=2, inst_id=0) pp3 = create_pre_prepare_no_bls(generate_state_root(), view_no=1, pp_seq_no=3, inst_id=0) pp4 = create_pre_prepare_no_bls(generate_state_root(), view_no=2, pp_seq_no=4, inst_id=0) pp5 = create_pre_prepare_no_bls(generate_state_root(), view_no=3, pp_seq_no=5, inst_id=0) pp6 = create_pre_prepare_no_bls(generate_state_root(), view_no=3, pp_seq_no=6, inst_id=0) orderer.prePrepares[(pp1.viewNo, pp1.ppSeqNo)] = pp1 orderer.prePrepares[(pp3.viewNo, pp3.ppSeqNo)] = pp3 orderer.sentPrePrepares[(pp2.viewNo, pp2.ppSeqNo)] = pp2 orderer.sentPrePrepares[(pp4.viewNo, pp4.ppSeqNo)] = pp4 assert not orderer.old_view_preprepares internal_bus.send(ViewChangeStarted(view_no=4)) assert orderer.old_view_preprepares[(pp1.ppSeqNo, pp1.digest)] == pp1 assert orderer.old_view_preprepares[(pp2.ppSeqNo, pp2.digest)] == pp2 assert orderer.old_view_preprepares[(pp3.ppSeqNo, pp3.digest)] == pp3 assert orderer.old_view_preprepares[(pp4.ppSeqNo, pp4.digest)] == pp4 # next calls append to existing data orderer.prePrepares[(pp5.viewNo, pp5.ppSeqNo)] = pp5 orderer.sentPrePrepares[(pp6.viewNo, pp6.ppSeqNo)] = pp6 internal_bus.send(ViewChangeStarted(view_no=4)) assert orderer.old_view_preprepares[(pp1.ppSeqNo, pp1.digest)] == pp1 assert orderer.old_view_preprepares[(pp2.ppSeqNo, pp2.digest)] == pp2 assert orderer.old_view_preprepares[(pp3.ppSeqNo, pp3.digest)] == pp3 assert orderer.old_view_preprepares[(pp4.ppSeqNo, pp4.digest)] == pp4 assert orderer.old_view_preprepares[(pp5.ppSeqNo, pp5.digest)] == pp5 assert orderer.old_view_preprepares[(pp6.ppSeqNo, pp6.digest)] == pp6
def process_need_view_change(self, msg: NeedViewChange): self._logger.info("{} processing {}".format(self, msg)) # 1. calculate new viewno view_no = msg.view_no if view_no is None: view_no = self._data.view_no + 1 # 2. Do cleanup before new view change starts self._clean_on_view_change_start() # 3. Update shared data self._data.view_no = view_no self._data.waiting_for_new_view = True self._data.primaries = self._primaries_selector.select_primaries( view_no=self._data.view_no, instance_count=self._data.quorums.f + 1, validators=self._data.validators) self._data.primary_name = generateName( self._data.primaries[self._data.inst_id], self._data.inst_id) if not self._data.is_master: return # 4. Build ViewChange message vc = self._build_view_change_msg() # 5. Send ViewChangeStarted via internal bus to update other services self._logger.info("{} sending {}".format(self, vc)) self._bus.send(ViewChangeStarted(view_no=self._data.view_no)) # 6. Send ViewChange msg to other nodes (via external bus) self._network.send(vc) self.view_change_votes.add_view_change(vc, self._data.name) # 7. Unstash messages for view change self._router.process_all_stashed(STASH_WAITING_VIEW_CHANGE) # 8. Restart instance change timer self._resend_inst_change_timer.stop() self._resend_inst_change_timer.start()
def test_clear_data_on_view_change_started(internal_bus, orderer): pp = create_pre_prepare_no_bls( generate_state_root(), view_no=0, pp_seq_no=10, inst_id=0, audit_txn_root="HSai3sMHKeAva4gWMabDrm1yNhezvPHfXnGyHf2ex1L4") prepare = create_prepare(req_key=(0, 10), state_root=generate_state_root(), inst_id=0) commit = create_commit_no_bls_sig(req_key=(0, 10), inst_id=0) key = (pp.viewNo, pp.ppSeqNo) orderer.prePrepares[key] = pp orderer.prepares[key] = prepare orderer.commits[key] = commit orderer.pre_prepare_tss[key][pp.auditTxnRootHash, "Node1"] = 1234 orderer.prePreparesPendingFinReqs.append(pp) orderer.prePreparesPendingPrevPP[key] = pp orderer.sent_preprepares[key] = pp orderer.batches[key] = [ pp.ledgerId, pp.discarded, pp.ppTime, generate_state_root(), len(pp.reqIdr) ] orderer.ordered.add(*key) internal_bus.send(ViewChangeStarted(view_no=4)) assert not orderer.prePrepares assert not orderer.prepares assert not orderer.commits assert not orderer.pre_prepare_tss assert not orderer.prePreparesPendingFinReqs assert not orderer.prePreparesPendingPrevPP assert not orderer.sent_preprepares assert not orderer.batches assert not orderer.ordered
def test_process_view_change_started(message_req_service: MessageReqService, internal_bus, data, fill_requested_lists): msg = ViewChangeStarted(0) internal_bus.send(msg) for handler in message_req_service.handlers.values(): assert not handler.requested_messages
def test_process_view_change_started(message_req_service: MessageReqService, internal_bus, data, fill_requested_lists): handler = message_req_service.handlers[NEW_VIEW] msg = ViewChangeStarted(0) internal_bus.send(msg) assert not handler.requested_messages