def add_new_view(self, msg: NewView, frm: str): frm = replica_name_to_node_name(frm) digest = self._get_msg_digest(msg) self._votes.setdefault(digest, NewViewVotes(msg)).add_vote(frm) if self._quorums.strong.is_reached(len(self._votes[digest])): self.new_view = msg
def inst_id_by_primary_name(self) -> dict: return { replica_name_to_node_name(r.primaryName): r.instId for r in self._replicas.values() if r.primaryName }
def add_view_change_ack(self, msg: ViewChangeAck, frm: str) -> bool: """ Adds view change ack and returns boolean indicating if it found node suspicios """ frm = replica_name_to_node_name(frm) return self._get_vote(msg.name).add_view_change_ack(msg, frm)
def get_node_name(replica_name: str): # TODO: Remove this wrapper return replica_name_to_node_name(replica_name)
def primary_name_by_inst_id(self) -> dict: return { r.instId: replica_name_to_node_name(r.primaryName) for r in self._replicas.values() }
def master_primary_name(self) -> Optional[str]: nm = self.replicas[0].primaryName if nm: return replica_name_to_node_name(nm)
def _update_connecteds(self): connecteds = {replica_name_to_node_name(replica.name) for replica in self._nodes} for replica in self._nodes: replica._network.update_connecteds(connecteds)
def get_master_primary_node(nodes): node = next(iter(nodes)) if node.replicas[0].primaryName is not None: nm = replica_name_to_node_name(node.replicas[0].primaryName) return nodeByName(nodes, nm) raise AssertionError('No primary found for master')
def name(self): return replica_name_to_node_name(self._data.name)
def check_view_change_completes_under_normal_conditions( random: SimRandom, min_latency, max_latency, filtered_msg_types, filter_probability): # PREPARE # 1. Create random pool with random initial state pool, committed = some_pool(random) N = pool.size F = (N - 1) // 3 # 2. set latency pool.network.set_latency(min_latency, max_latency) # 3. set filters pool.network.add_processor( Discard(probability=filter_probability), message_type(filtered_msg_types), message_dst(replica_name_to_node_name(pool.nodes[-1].name))) # EXECUTE # Schedule view change at different time on all nodes for node in pool.nodes: pool.timer.schedule( random.float(0, 10), partial(node._view_changer.process_need_view_change, NeedViewChange())) # CHECK # 1. Make sure all nodes complete view change pool.timer.wait_for(lambda: all(not node._data.waiting_for_new_view and node._data.view_no > 0 for node in pool.nodes)) # 2. check that equal stable checkpoint is set on at least N-F nodes (F nodes may lag behind and will catchup) stable_checkpoints = [n._data.stable_checkpoint for n in pool.nodes] most_freq_stable_ckeckpoint = Counter(stable_checkpoints).most_common(1) stable_checkpoint = most_freq_stable_ckeckpoint[0][0] assert most_freq_stable_ckeckpoint[0][1] >= N - F # 3. check that equal preprepares is set on all node with the found stable checkpoint preprepares = set() for n in pool.nodes: if n._data.stable_checkpoint >= stable_checkpoint: preprepares.add(tuple(n._data.preprepared)) assert len(preprepares) == 1 # 4. Make sure all nodes end up in same view for node_a, node_b in zip(pool.nodes, pool.nodes[1:]): assert node_a._data.view_no == node_b._data.view_no assert node_a._data.primary_name == node_b._data.primary_name # 5. Make sure that all committed reqs are ordered with the same ppSeqNo in the new view: committed_above_cp = [ c for c in committed if c.pp_seq_no > stable_checkpoint ] for n in pool.nodes: if n._data.stable_checkpoint >= stable_checkpoint: for expected_batch, actual_batch in zip( committed_above_cp, n._data.preprepared[:len(committed_above_cp)]): assert expected_batch.pp_view_no == actual_batch.pp_view_no assert expected_batch.pp_seq_no == actual_batch.pp_seq_no assert expected_batch.pp_digest == actual_batch.pp_digest
def process_disconnected(self, msg: ExternalBus.Disconnected, frm: str): if frm == replica_name_to_node_name(self._data.primary_name): self._primary_disconnected()
def _request_new_view_message(self, view_no): self._bus.send(MissingMessage(msg_type=NEW_VIEW, key=view_no, inst_id=self._data.inst_id, dst=[replica_name_to_node_name(self._data.primary_name)], stash_data=None))