def test_updates_shared_data_on_need_view_change(internal_bus, view_change_service, initial_view_no, is_master): old_primary = view_change_service._data.primary_name old_primaries = view_change_service._data.primaries old_data = copy_shared_data(view_change_service._data) internal_bus.send(NeedViewChange()) assert view_change_service._data.view_no == initial_view_no + 1 assert view_change_service._data.waiting_for_new_view assert view_change_service._data.primary_name != old_primary assert view_change_service._data.primaries != old_primaries new_data = copy_shared_data(view_change_service._data) check_service_changed_only_owned_fields_in_shared_data( ViewChangeService, old_data, new_data) old_primary = view_change_service._data.primary_name old_primaries = view_change_service._data.primaries old_data = copy_shared_data(view_change_service._data) internal_bus.send(NeedViewChange(view_no=initial_view_no + 3)) assert view_change_service._data.view_no == initial_view_no + 3 assert view_change_service._data.waiting_for_new_view assert view_change_service._data.primary_name != old_primary assert view_change_service._data.primaries != old_primaries new_data = copy_shared_data(view_change_service._data) check_service_changed_only_owned_fields_in_shared_data( ViewChangeService, old_data, new_data)
def test_updates_shared_data_on_need_view_change(internal_bus, view_change_service, initial_view_no, is_master): old_primary = view_change_service._data.primary_name old_data = copy_shared_data(view_change_service._data) internal_bus.send(NeedViewChange()) assert view_change_service._data.view_no == initial_view_no + 1 assert view_change_service._data.waiting_for_new_view if not is_master: assert view_change_service._data.master_reordered_after_vc == False assert view_change_service._data.primary_name is None else: assert view_change_service._data.primary_name != old_primary new_data = copy_shared_data(view_change_service._data) check_service_changed_only_owned_fields_in_shared_data(ViewChangeService, old_data, new_data) old_primary = view_change_service._data.primary_name old_data = copy_shared_data(view_change_service._data) internal_bus.send(NeedViewChange(view_no=initial_view_no + 3)) assert view_change_service._data.view_no == initial_view_no + 3 assert view_change_service._data.waiting_for_new_view if not is_master: assert view_change_service._data.master_reordered_after_vc == False assert view_change_service._data.primary_name is None else: assert view_change_service._data.primary_name != old_primary new_data = copy_shared_data(view_change_service._data) check_service_changed_only_owned_fields_in_shared_data(ViewChangeService, old_data, new_data)
def test_view_change_replaces_prepare(view_change_service, data): data.view_no = 0 data.prepared = [BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")] # view no 0->1 view_change_service._bus.send(NeedViewChange()) assert data.view_no == 1 msg = get_view_change(view_change_service) assert msg.viewNo == 1 assert msg.prepared == [(0, 0, 1, "digest1"), (0, 0, 2, "digest2")] # view no 1->2 # replace by different viewNo and digest data.prepared = [ BatchID(1, 1, 1, "digest11"), BatchID(1, 1, 2, "digest22") ] view_change_service._bus.send(NeedViewChange()) assert data.view_no == 2 msg = get_view_change(view_change_service) assert msg.viewNo == 2 assert msg.prepared == [(1, 1, 1, "digest11"), (1, 1, 2, "digest22")] # view no 2->3 # replace by different viewNo only data.prepared = [BatchID(2, 2, 2, "digest22"), BatchID(2, 2, 3, "digest3")] view_change_service._bus.send(NeedViewChange()) assert data.view_no == 3 msg = get_view_change(view_change_service) assert msg.viewNo == 3 assert msg.prepared == [(1, 1, 1, "digest11"), (2, 2, 2, "digest22"), (2, 2, 3, "digest3")]
def _finish_view_change_if_needed(self): if self._new_view is None: return view_changes = [] for name, vc_digest in self._new_view.viewChanges: vc = self._votes.get_view_change(name, vc_digest) # We don't have needed ViewChange, so we cannot validate NewView if vc is None: return view_changes.append(vc) cp = self._new_view_builder.calc_checkpoint(view_changes) if cp is None or cp != self._new_view.checkpoint: # New primary is malicious self._logger.info( "{} Received invalid NewView {} for view {}: expected checkpoint {}" .format(self._data.name, self._new_view, self._data.view_no, cp)) self._bus.send(NeedViewChange()) return batches = self._new_view_builder.calc_batches(cp, view_changes) if batches != self._new_view.batches: # New primary is malicious self._logger.info( "{} Received invalid NewView {} for view {}: expected batches {}" .format(self._data.name, self._new_view, self._data.view_no, batches)) self._bus.send(NeedViewChange()) return self._finish_view_change()
def test_view_change_data_multiple(view_change_service, data): # view 0 -> 1 data.view_no = 0 cp1 = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=10, digest=cp_digest(10)) data.checkpoints.add(cp1) data.stable_checkpoint = 0 data.prepared = [BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")] data.preprepared = [ BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(0, 0, 3, "digest3") ] view_change_service._bus.send(NeedViewChange()) assert data.view_no == 1 msg = get_view_change(view_change_service) assert msg.viewNo == 1 assert msg.prepared == [(0, 0, 1, "digest1"), (0, 0, 2, "digest2")] assert msg.preprepared == [(0, 0, 1, "digest1"), (0, 0, 2, "digest2"), (0, 0, 3, "digest3")] assert msg.stableCheckpoint == 0 assert msg.checkpoints == [data.initial_checkpoint, cp1] # view 1 -> 2 data.view_no = 1 cp2 = Checkpoint(instId=0, viewNo=1, seqNoStart=0, seqNoEnd=20, digest=cp_digest(20)) data.checkpoints.add(cp2) data.stable_checkpoint = 0 data.prepared = [ BatchID(1, 1, 11, "digest11"), BatchID(1, 1, 12, "digest12") ] data.preprepared = [ BatchID(1, 1, 11, "digest11"), BatchID(1, 1, 12, "digest12"), BatchID(1, 1, 13, "digest13") ] view_change_service._bus.send(NeedViewChange()) assert data.view_no == 2 msg = get_view_change(view_change_service) assert msg.viewNo == 2 assert msg.prepared == [(0, 0, 1, "digest1"), (0, 0, 2, "digest2"), (1, 1, 11, "digest11"), (1, 1, 12, "digest12")] assert msg.preprepared == [(0, 0, 1, "digest1"), (0, 0, 2, "digest2"), (0, 0, 3, "digest3"), (1, 1, 11, "digest11"), (1, 1, 12, "digest12"), (1, 1, 13, "digest13")] assert msg.stableCheckpoint == 0 assert msg.checkpoints == [data.initial_checkpoint, cp1, cp2]
def test_view_change_data_multiple_respects_checkpoint(view_change_service, data): # view 0 -> 1 data.view_no = 0 cp1 = Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=10, digest=cp_digest(10)) data.checkpoints.add(cp1) data.stable_checkpoint = 0 data.prepared = [BatchID(0, 1, "digest1"), BatchID(0, 2, "digest2")] data.preprepared = [ BatchID(0, 1, "digest1"), BatchID(0, 2, "digest2"), BatchID(0, 3, "digest3") ] view_change_service._bus.send(NeedViewChange()) assert data.view_no == 1 msg = get_view_change(view_change_service) assert msg.viewNo == 1 assert msg.prepared == [(0, 1, "digest1"), (0, 2, "digest2")] assert msg.preprepared == [(0, 1, "digest1"), (0, 2, "digest2"), (0, 3, "digest3")] assert msg.stableCheckpoint == 0 assert msg.checkpoints == [data.initial_checkpoint, cp1] # view 1 -> 2 data.view_no = 1 cp2 = Checkpoint(instId=0, viewNo=1, seqNoStart=0, seqNoEnd=20, digest=cp_digest(20)) data.checkpoints.add(cp2) data.stable_checkpoint = 10 # Here we simulate checkpoint stabilization logic of CheckpointService, which # clears all checkpoints below stabilized one data.checkpoints.remove(data.initial_checkpoint) data.prepared = [BatchID(1, 11, "digest11"), BatchID(1, 12, "digest12")] data.preprepared = [ BatchID(1, 11, "digest11"), BatchID(1, 12, "digest12"), BatchID(1, 13, "digest13") ] view_change_service._bus.send(NeedViewChange()) assert data.view_no == 2 msg = get_view_change(view_change_service) assert msg.viewNo == 2 assert msg.prepared == [(1, 11, "digest11"), (1, 12, "digest12")] assert msg.preprepared == [(1, 11, "digest11"), (1, 12, "digest12"), (1, 13, "digest13")] assert msg.stableCheckpoint == 10 assert msg.checkpoints == [cp1, cp2]
def test_start_view_change_sends_view_change_started(internal_bus, view_change_service, initial_view_no): handler = Mock() internal_bus.subscribe(ViewChangeStarted, handler) internal_bus.send(NeedViewChange()) handler.assert_called_once_with( ViewChangeStarted(view_no=initial_view_no + 1)) internal_bus.send(NeedViewChange(view_no=5)) handler.assert_called_with(ViewChangeStarted(view_no=5))
def test_start_view_change_sends_view_change_started(internal_bus, view_change_service, initial_view_no, is_master): # TODO: Need to decide on how we handle this case if not is_master: return handler = Mock() internal_bus.subscribe(ViewChangeStarted, handler) internal_bus.send(NeedViewChange()) handler.assert_called_once_with(ViewChangeStarted(view_no=initial_view_no + 1)) internal_bus.send(NeedViewChange(view_no=5)) handler.assert_called_with(ViewChangeStarted(view_no=5))
def test_view_change_permutations(random_random): # Create pool in some random initial state pool, _ = some_pool(random_random) quorums = pool.nodes[0]._data.quorums # Get view change votes from all nodes view_change_messages = [] for node in pool.nodes: network = MockNetwork() node._view_changer._network = network node._view_changer._bus.send(NeedViewChange()) view_change_messages.append(network.sent_messages[0][0]) # Select random number of view change votes num_view_changes = random_random.integer(quorums.view_change.value, quorums.n) view_change_messages = random_random.sample(view_change_messages, num_view_changes) # Check that all committed requests are present in final batches new_view_builder = pool.nodes[0]._view_changer._new_view_builder cps = { new_view_builder.calc_checkpoint( random_random.shuffle(view_change_messages)) for _ in range(10) } assert len(cps) == 1
def test_view_change_data(view_change_service, data): data.view_no = 1 data.checkpoints.clear() cp = Checkpoint(instId=0, viewNo=1, seqNoStart=0, seqNoEnd=10, digest=cp_digest(10)) data.checkpoints.add(cp) data.stable_checkpoint = 10 data.prepared = [BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2")] data.preprepared = [ BatchID(0, 0, 1, "digest1"), BatchID(0, 0, 2, "digest2"), BatchID(0, 0, 3, "digest3") ] view_change_service._bus.send(NeedViewChange()) assert data.view_no == 2 msg = get_view_change(view_change_service) assert msg.viewNo == 2 assert msg.prepared == [(0, 0, 1, "digest1"), (0, 0, 2, "digest2")] assert msg.preprepared == [(0, 0, 1, "digest1"), (0, 0, 2, "digest2"), (0, 0, 3, "digest3")] assert msg.stableCheckpoint == 10 assert msg.checkpoints == [cp]
def test_new_view_combinations(random): # Create pool in some random initial state pool, _ = some_pool(random) quorums = pool.nodes[0]._data.quorums # Get view change votes from all nodes view_change_messages = [] for node in pool.nodes: network = MockNetwork() node._view_changer._network = network node._view_changer._bus.send(NeedViewChange()) view_change_messages.append(network.sent_messages[0][0]) # Check that all committed requests are present in final batches for _ in range(10): num_votes = quorums.strong.value votes = random.sample(view_change_messages, num_votes) cp = pool.nodes[0]._view_changer._new_view_builder.calc_checkpoint(votes) assert cp is not None batches = pool.nodes[0]._view_changer._new_view_builder.calc_batches(cp, votes) committed = calc_committed(votes) committed = [c for c in committed if c.pp_seq_no > cp.seqNoEnd] assert batches is not None assert committed == batches[:len(committed)]
def test_send_instance_change_on_timeout_no_new_view_received( internal_bus, external_bus, view_change_service, timer, initial_view_no, is_master): # TODO: Need to decide on how we handle this case if not is_master: return internal_bus.send(NeedViewChange()) init_network_msg_count = len(external_bus.sent_messages) timer.sleep(view_change_service._config.NEW_VIEW_TIMEOUT - 1) assert view_change_service._data.view_no == initial_view_no + 1 assert init_network_msg_count == len(external_bus.sent_messages) timer.sleep(2) # we don't go to new view, just send Instance Change assert view_change_service._data.view_no == initial_view_no + 1 assert init_network_msg_count + 1 == len(external_bus.sent_messages) msg, dst = external_bus.sent_messages[-1] assert dst is None # broadcast assert isinstance(msg, InstanceChange) assert msg.viewNo == initial_view_no + 2 assert msg.reason == Suspicions.INSTANCE_CHANGE_TIMEOUT.code timer.sleep(view_change_service._config.NEW_VIEW_TIMEOUT + 1) # we don't go to new view, just send Instance Change assert view_change_service._data.view_no == initial_view_no + 1 assert init_network_msg_count + 2 == len(external_bus.sent_messages) msg, dst = external_bus.sent_messages[-1] assert dst is None # broadcast assert isinstance(msg, InstanceChange) assert msg.viewNo == initial_view_no + 2 assert msg.reason == Suspicions.INSTANCE_CHANGE_TIMEOUT.code
def test_non_primary_responds_to_view_change_message_with_view_change_ack_to_new_primary( internal_bus, external_bus, some_item, other_item, validators, primary, view_change_service_builder, initial_view_no, is_master): # TODO: Need to decide on how we handle this case if not is_master: return next_view_no = initial_view_no + 1 non_primary_name = some_item(validators, exclude=[primary(next_view_no)]) service = view_change_service_builder(non_primary_name) internal_bus.send(NeedViewChange()) external_bus.sent_messages.clear() vc = create_view_change(initial_view_no) frm = other_item(validators, exclude=[non_primary_name]) external_bus.process_incoming(vc, generateName(frm, service._data.inst_id)) assert len(external_bus.sent_messages) == 1 msg, dst = external_bus.sent_messages[0] assert dst == [getNodeName(service._data.primary_name)] assert isinstance(msg, ViewChangeAck) assert msg.viewNo == vc.viewNo assert msg.name == frm assert msg.digest == view_change_digest(vc)
def test_new_view_message_is_sent_by_primary_when_view_change_certificate_is_reached( internal_bus, validators, primary, view_change_service_builder, initial_view_no, view_change_acks): primary_name = primary(initial_view_no + 1) service = view_change_service_builder(primary_name) # start view change internal_bus.send(NeedViewChange()) service._network.sent_messages.clear() # receive quorum of ViewChanges and ViewChangeAcks non_primaries = [item for item in validators if item != primary_name] vc = create_view_change(initial_view_no) for vc_frm in non_primaries: service._network.process_incoming(vc, vc_frm) for ack, ack_frm in view_change_acks(vc, vc_frm, primary_name, len(validators) - 2): service._network.process_incoming(ack, ack_frm) # check that NewView has been sent assert len(service._network.sent_messages) == 1 msg, dst = service._network.sent_messages[0] assert dst is None # message was broadcast assert isinstance(msg, NewView) assert msg.viewNo == initial_view_no + 1
def test_new_view_message_is_not_sent_by_non_primary_when_view_change_certificate_is_reached( internal_bus, external_bus, validators, primary, view_change_service_builder, initial_view_no, some_item, is_master): # TODO: Need to decide on how we handle this case if not is_master: return next_view_no = initial_view_no + 1 primary_name = primary(next_view_no) non_primary_name = some_item(validators, exclude=[primary_name]) service = view_change_service_builder(non_primary_name) # start view change internal_bus.send(NeedViewChange()) external_bus.sent_messages.clear() # receive quorum of ViewChanges and ViewChangeAcks non_primaries = [item for item in validators if item != primary_name] vc = create_view_change(initial_view_no) for vc_frm in non_primaries: external_bus.process_incoming( vc, generateName(vc_frm, service._data.inst_id)) for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries): external_bus.process_incoming( ack, generateName(ack_frm, service._data.inst_id)) # check that NewView hasn't been sent assert all(not isinstance(msg, NewView) for msg in external_bus.sent_messages)
def do_test(seed): # 1. Setup pool requests_count = REQUEST_COUNT batches_count = requests_count // MAX_BATCH_SIZE random = DefaultSimRandom(seed) reqs = create_requests(requests_count) pool = setup_pool(random) pool.sim_send_requests(reqs) initial_ledger_size = get_pools_ledger_size(pool) # 2. Send 3pc batches random_interval = 1000 RepeatingTimer(pool.timer, random_interval, partial(order_requests, pool)) for node in pool.nodes: pool.timer.schedule( 3000, partial(node._view_changer.process_need_view_change, NeedViewChange(view_no=1))) # 3. Make sure that view_change is completed for node in pool.nodes: pool.timer.wait_for(lambda: node._view_changer._data.view_no == 1) # 3. Make sure all nodes ordered all the requests for node in pool.nodes: pool.timer.wait_for(partial(check_batch_count, node, batches_count)) pool.timer.wait_for( partial(check_ledger_size, node, initial_ledger_size + REQUEST_COUNT)) # 4. Check data consistency pool.timer.wait_for(lambda: check_no_asserts(check_consistency, pool))
def check_view_change_completes_under_normal_conditions(random: SimRandom): # Create random pool with random initial state pool, committed = some_pool(random) # Schedule view change at different time on all nodes for node in pool.nodes: pool.timer.schedule( random.integer(0, 10000), partial(node._view_changer.process_need_view_change, NeedViewChange())) # Make sure all nodes complete view change pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view and node._data.view_no > 0 for node in pool.nodes)) # Make sure all nodes end up in same state for node_a, node_b in zip(pool.nodes, pool.nodes[1:]): assert node_a._data.view_no == node_b._data.view_no assert node_a._data.primary_name == node_b._data.primary_name assert node_a._data.stable_checkpoint == node_b._data.stable_checkpoint assert node_a._data.preprepared == node_b._data.preprepared # Make sure that all committed reqs are ordered with the same ppSeqNo in the new view: stable_checkpoint = pool.nodes[0]._data.stable_checkpoint committed = [c for c in committed if c.pp_seq_no > stable_checkpoint] for n in pool.nodes: assert committed == n._data.preprepared[:len(committed)]
def test_new_view_message_is_sent_by_primary_when_view_change_certificate_is_reached( internal_bus, external_bus, validators, primary, view_change_service_builder, initial_view_no, view_change_acks, is_master): # TODO: Need to decide on how we handle this case if not is_master: return primary_name = primary(initial_view_no + 1) service = view_change_service_builder(primary_name) # start view change internal_bus.send(NeedViewChange()) external_bus.sent_messages.clear() # receive quorum of ViewChanges and ViewChangeAcks non_primaries = [item for item in validators if item != primary_name] vc = create_view_change(initial_view_no) for vc_frm in non_primaries: external_bus.process_incoming( vc, generateName(vc_frm, service._data.inst_id)) for ack, ack_frm in view_change_acks(vc, vc_frm, primary_name, len(validators) - 2): external_bus.process_incoming( ack, generateName(ack_frm, service._data.inst_id)) # check that NewView has been sent assert len(external_bus.sent_messages) == 1 msg, dst = external_bus.sent_messages[0] assert dst is None # message was broadcast assert isinstance(msg, NewView) assert msg.viewNo == initial_view_no + 1
def test_view_change_while_ordering_with_real_msgs(seed): # 1. Setup pool requests_count = REQUEST_COUNT batches_count = requests_count // MAX_BATCH_SIZE random = DefaultSimRandom(seed) pool = setup_pool(random, requests_count) # 2. Send 3pc batches random_interval = 1000 RepeatingTimer(pool.timer, random_interval, partial(order_requests, pool)) for node in pool.nodes: pool.timer.schedule( 3000, partial(node._view_changer.process_need_view_change, NeedViewChange(view_no=1))) # 3. Make sure that view_change is completed for node in pool.nodes: pool.timer.wait_for(lambda: node._view_changer._data.view_no == 1) # 3. Make sure all nodes ordered all the requests for node in pool.nodes: pool.timer.wait_for(partial(check_batch_count, node, batches_count)) # 4. Check data consistency check_consistency(pool)
def test_do_not_send_instance_change_on_timeout_when_multiple_view_change_finished_on_time(internal_bus, external_bus, validators, primary, view_change_service_builder, timer, initial_view_no, is_master): # TODO: Need to decide on how we handle this case if not is_master: return primary_name = primary(initial_view_no + 2) service = view_change_service_builder(primary_name) # start first view change internal_bus.send(NeedViewChange()) # start second view change internal_bus.send(NeedViewChange()) external_bus.sent_messages.clear() # receive quorum of ViewChanges and ViewChangeAcks non_primaries = [item for item in validators if item != primary_name] vc = create_view_change(initial_view_no + 1) service._data.checkpoints.append(Checkpoint(instId=0, viewNo=initial_view_no + 1, seqNoStart=0, seqNoEnd=DEFAULT_STABLE_CHKP, digest=cp_digest(DEFAULT_STABLE_CHKP))) for vc_frm in non_primaries: external_bus.process_incoming(vc, generateName(vc_frm, service._data.inst_id)) for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries): external_bus.process_incoming(ack, generateName(ack_frm, service._data.inst_id)) # check that view change is finished assert service._data.view_no == initial_view_no + 2 assert not service._data.waiting_for_new_view assert len(external_bus.sent_messages) == 1 msg, dst = external_bus.sent_messages[0] assert isinstance(msg, NewView) # make sure view change hasn't been started again timer.sleep(service._config.NEW_VIEW_TIMEOUT + 1) assert service._data.view_no == initial_view_no + 2 assert len(external_bus.sent_messages) == 1 msg, dst = external_bus.sent_messages[0] assert isinstance(msg, NewView)
def check_view_change_completes_under_normal_conditions( random: SimRandom, min_latency, max_latency, filtered_msg_types, filter_probability): # PREPARE # 1. Create random pool with random initial state pool, committed = some_pool(random) N = pool.size F = (N - 1) // 3 # 2. set latency pool.network.set_latency(min_latency, max_latency) # 3. set filter pool.network.set_filter([getNodeName(pool.nodes[-1].name)], filtered_msg_types, filter_probability) # EXECUTE # Schedule view change at different time on all nodes for node in pool.nodes: pool.timer.schedule( random.integer(0, 10000), partial(node._view_changer.process_need_view_change, NeedViewChange())) # CHECK # 1. Make sure all nodes complete view change pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view and node._data.view_no > 0 for node in pool.nodes)) # 2. check that equal stable checkpoint is set on at least N-F nodes (F nodes may lag behind and will catchup) stable_checkpoints = [n._data.stable_checkpoint for n in pool.nodes] most_freq_stable_ckeckpoint = Counter(stable_checkpoints).most_common(1) stable_checkpoint = most_freq_stable_ckeckpoint[0][0] assert most_freq_stable_ckeckpoint[0][1] >= N - F # 3. check that equal preprepares is set on all node with the found stable checkpoint preprepares = set() for n in pool.nodes: if n._data.stable_checkpoint >= stable_checkpoint: preprepares.add(tuple(n._data.preprepared)) assert len(preprepares) == 1 # 4. Make sure all nodes end up in same view for node_a, node_b in zip(pool.nodes, pool.nodes[1:]): assert node_a._data.view_no == node_b._data.view_no assert node_a._data.primary_name == node_b._data.primary_name # 5. Make sure that all committed reqs are ordered with the same ppSeqNo in the new view: committed_above_cp = [ c for c in committed if c.pp_seq_no > stable_checkpoint ] for n in pool.nodes: if n._data.stable_checkpoint >= stable_checkpoint: assert committed_above_cp == n._data.preprepared[:len( committed_above_cp)]
def test_view_change_finished_is_sent_by_non_primary_once_view_change_certificate_is_reached_and_new_view_from_primary( internal_bus, external_bus, validators, primary, view_change_service_builder, initial_view_no, some_item, is_master): # TODO: Need to decide on how we handle this case if not is_master: return handler = Mock() internal_bus.subscribe(NewViewAccepted, handler) next_view_no = initial_view_no + 1 primary_name = primary(next_view_no) non_primary_name = some_item(validators, exclude=[primary_name]) service = view_change_service_builder(non_primary_name) vc = create_view_change(initial_view_no) service._data.preprepared = vc.preprepared service._data.prepared = vc.prepared service._data.stable_checkpoint = vc.stableCheckpoint service._data.checkpoints = vc.checkpoints old_data = copy_shared_data(service._data) # start view change internal_bus.send(NeedViewChange()) external_bus.sent_messages.clear() # receive quorum of ViewChanges and ViewChangeAcks non_primaries = [item for item in validators if item != primary_name] non_primaries = random.sample(non_primaries, service._data.quorums.view_change.value) new_view = create_new_view_from_vc(vc, non_primaries) for vc_frm in non_primaries: external_bus.process_incoming( vc, generateName(vc_frm, service._data.inst_id)) for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries): external_bus.process_incoming( ack, generateName(ack_frm, service._data.inst_id)) # check that NewViewAccepted hasn't been sent if NewView is from non-primary external_bus.process_incoming( new_view, generateName(non_primary_name, service._data.inst_id)) handler.assert_not_called() assert service._data.view_no == initial_view_no + 1 assert service._data.waiting_for_new_view # check that NewViewAccepted has been sent if NewView is from primary external_bus.process_incoming( new_view, generateName(primary_name, service._data.inst_id)) expected_finish_vc = NewViewAccepted(view_no=initial_view_no + 1, view_changes=new_view.viewChanges, checkpoint=new_view.checkpoint, batches=new_view.batches) handler.assert_called_with(expected_finish_vc) # check that shared data is updated new_data = copy_shared_data(service._data) check_service_changed_only_owned_fields_in_shared_data( ViewChangeService, old_data, new_data) assert service._data.view_no == initial_view_no + 1 assert not service._data.waiting_for_new_view
def test_new_view_incorrect_checkpoint(internal_bus, validators, primary, view_change_service_builder, initial_view_no, some_item): next_view_no = initial_view_no + 1 primary_name = primary(next_view_no) non_primary_name = some_item(validators, exclude=[primary_name]) service = view_change_service_builder(non_primary_name) vc = create_view_change(initial_view_no) service._data.preprepared = vc.preprepared service._data.prepared = vc.prepared service._data.stable_checkpoint = vc.stableCheckpoint service._data.checkpoints = vc.checkpoints # start view change internal_bus.send(NeedViewChange()) service._network.sent_messages.clear() handler = Mock() internal_bus.subscribe(NeedViewChange, handler) # receive quorum of ViewChanges and ViewChangeAcks non_primaries = [item for item in validators if item != primary_name] non_primaries = random.sample(non_primaries, service._data.quorums.view_change.value) for vc_frm in non_primaries: service._network.process_incoming(vc, vc_frm) for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries): service._network.process_incoming(ack, ack_frm) cp = Checkpoint(instId=0, viewNo=initial_view_no, seqNoStart=0, seqNoEnd=1000, digest=cp_digest(1000)) new_view = create_new_view_from_vc(vc, non_primaries, checkpoint=cp) # send NewView by Primary service._network.process_incoming(new_view, primary_name) # make sure that NeedViewChange is called handler.assert_called_with(NeedViewChange()) # make sure that we get to the next view assert service._data.view_no == initial_view_no + 2 assert service._data.waiting_for_new_view
def test_node_txn_add_new_node(node_req_add, sim_pool, random): # Step 1. Prepare NODE requests and some of params to check # Count of NODE requests is random but less then pool size pool_reqs = [ node_req_add(sim_pool.size + i) for i in range(random.integer(1, sim_pool.size - 1)) ] domain_reqs = create_requests(DOMAIN_REQ_COUNT) reqs = pool_reqs + domain_reqs shuffle(reqs) sim_pool.sim_send_requests(reqs) current_view_no = sim_pool.view_no current_pool_ledger_size = get_pools_ledger_size(sim_pool, ledger_id=POOL_LEDGER_ID) current_domain_ledger_size = get_pools_ledger_size( sim_pool, ledger_id=DOMAIN_LEDGER_ID) expected_view_no = current_view_no + 1 expected_node_reg = sim_pool.validators + [ Greeks[sim_pool.size + i][0] for i in range(len(pool_reqs)) ] # Step 2. Start requests ordering random_interval = random.integer(10, 20) * 100 RepeatingTimer(sim_pool.timer, random_interval, partial(order_requests, sim_pool)) # Step 3. Initiate view change process during request's ordering for node in sim_pool.nodes: sim_pool.timer.schedule( random_interval + 1000, partial(node._view_changer.process_need_view_change, NeedViewChange(view_no=1))) # Step 4. Wait for VC completing for node in sim_pool.nodes: sim_pool.timer.wait_for(lambda: node._view_changer._data.view_no == 1) for node in sim_pool.nodes: sim_pool.timer.wait_for(lambda: not node._data.waiting_for_new_view) # Step 5. Check parameters like ordered txns count, node_reg state # For now we can run this checks only for old nodes with newly added. # Because we cannot run catchup process # ToDo: change this checks for making they on whole pool after INDY-2148 will be implemented sim_pool.timer.wait_for(lambda: all( [n._data.view_no == expected_view_no for n in sim_pool.nodes])) sim_pool.timer.wait_for( partial(check_node_reg, sim_pool, expected_node_reg)) for node in sim_pool.nodes: sim_pool.timer.wait_for( partial(check_ledger_size, node, current_pool_ledger_size + len(pool_reqs), POOL_LEDGER_ID)) sim_pool.timer.wait_for( partial(check_ledger_size, node, current_domain_ledger_size + len(domain_reqs), DOMAIN_LEDGER_ID))
def test_view_change_empty_prepares(view_change_service, data): data.prepared = [] data.preprepared = [] view_change_service._bus.send(NeedViewChange()) msg = get_view_change(view_change_service) assert msg.prepared == [] assert msg.preprepared == []
def test_start_view_change_broadcasts_view_change_message( internal_bus, view_change_service, initial_view_no): internal_bus.send(NeedViewChange()) assert len(view_change_service._network.sent_messages) == 1 msg, dst = view_change_service._network.sent_messages[0] assert dst is None # message was broadcast assert isinstance(msg, ViewChange) assert msg.viewNo == initial_view_no + 1 assert msg.stableCheckpoint == view_change_service._data.stable_checkpoint
def test_send_instance_change_on_new_view_with_incorrect_checkpoint( internal_bus, external_bus, validators, primary, view_change_service_builder, initial_view_no, some_item, is_master): # TODO: Need to decide on how we handle this case if not is_master: return next_view_no = initial_view_no + 1 primary_name = primary(next_view_no) non_primary_name = some_item(validators, exclude=[primary_name]) service = view_change_service_builder(non_primary_name) vc = create_view_change(initial_view_no) service._data.preprepared = vc.preprepared service._data.prepared = vc.prepared service._data.stable_checkpoint = vc.stableCheckpoint service._data.checkpoints = vc.checkpoints # start view change internal_bus.send(NeedViewChange()) external_bus.sent_messages.clear() # receive quorum of ViewChanges and ViewChangeAcks non_primaries = [item for item in validators if item != primary_name] non_primaries = random.sample(non_primaries, service._data.quorums.view_change.value) for vc_frm in non_primaries: external_bus.process_incoming( vc, generateName(vc_frm, service._data.inst_id)) for ack, ack_frm in create_view_change_acks(vc, vc_frm, non_primaries): external_bus.process_incoming( ack, generateName(ack_frm, service._data.inst_id)) cp = Checkpoint(instId=0, viewNo=initial_view_no, seqNoStart=0, seqNoEnd=1000, digest=cp_digest(1000)) new_view = create_new_view_from_vc(vc, non_primaries, checkpoint=cp) # send NewView by Primary init_network_msg_count = len(external_bus.sent_messages) external_bus.process_incoming( new_view, generateName(primary_name, service._data.inst_id)) # we don't go to new view, just send Instance Change assert service._data.view_no == initial_view_no + 1 assert init_network_msg_count + 1 == len(external_bus.sent_messages) msg, dst = external_bus.sent_messages[-1] assert dst is None # broadcast assert isinstance(msg, InstanceChange) assert msg.viewNo == initial_view_no + 2 assert msg.reason == Suspicions.NEW_VIEW_INVALID_CHECKPOINTS.code
def test_unstash_future_view_on_need_view_change(external_bus, internal_bus, replica_service): replica_service._data.view_no = 1 replica_service._data.node_mode = Mode.participating external_bus.process_incoming( create_new_view(initial_view_no=1, stable_cp=200), replica_service._data.primary_name) external_bus.process_incoming(create_commit_no_bls_sig(req_key=(2, 10)), replica_service._data.primary_name) assert replica_service.stasher.stash_size(STASH_VIEW) == 2 internal_bus.send(NeedViewChange(view_no=2)) assert replica_service.stasher.stash_size(STASH_VIEW) == 0 assert replica_service.stasher.stash_size(STASH_WAITING_NEW_VIEW) == 1
def test_non_primary_responds_to_view_change_message_with_view_change_ack_to_new_primary( internal_bus, some_item, other_item, validators, primary, view_change_service_builder, initial_view_no): next_view_no = initial_view_no + 1 non_primary_name = some_item(validators, exclude=[primary(next_view_no)]) service = view_change_service_builder(non_primary_name) internal_bus.send(NeedViewChange()) service._network.sent_messages.clear() vc = create_view_change(initial_view_no) frm = other_item(validators, exclude=[non_primary_name]) service._network.process_incoming(vc, frm) assert len(service._network.sent_messages) == 1 msg, dst = service._network.sent_messages[0] assert dst == service._data.primary_name assert isinstance(msg, ViewChangeAck) assert msg.viewNo == vc.viewNo assert msg.name == frm assert msg.digest == view_change_digest(vc)
def test_new_view_from_malicious(view_change_service_builder, primary, initial_view_no, validators): """ This test shows situation, when there is quorum of correct NEW_VIEW msgs and NEW_VIEW msg from malicious primary. In this case, view_change will be completed by quorum of the same NEW_VIEW msgs not by NEW_VIEW from malicious """ proposed_view_no = initial_view_no + 1 primary_name = primary(proposed_view_no) without_primary = [v for v in validators if v != primary_name] vcs_name = without_primary[0] vcs = view_change_service_builder(vcs_name) vcs._data.is_master = True vcs.process_need_view_change(NeedViewChange(view_no=proposed_view_no)) vc_not_malicious = vcs.view_change_votes._get_vote(vcs_name).view_change not_malicious_nv = create_new_view_from_vc(vc_not_malicious, without_primary, checkpoint=vc_not_malicious.checkpoints[-1]) vc_from_malicious = create_view_change(initial_view_no, stable_cp=20, batches=[]) for i in range(0, len(without_primary)): vcs.view_change_votes.add_view_change(vc_not_malicious, without_primary[i]) vcs._data.new_view_votes.add_new_view(not_malicious_nv, without_primary[i]) vcs.view_change_votes.add_view_change(vc_from_malicious, primary_name) malicious_nv = NewView(viewNo=proposed_view_no, viewChanges=[[primary_name, view_change_digest(vc_from_malicious)]], checkpoint=Checkpoint(instId=0, viewNo=initial_view_no, seqNoStart=10, seqNoEnd=20, digest=cp_digest(20)), batches=[]) vcs.process_new_view_message(malicious_nv, "{}:{}".format(primary_name, 0)) assert not vcs._data.waiting_for_new_view