def test_view_change_primary_selection(validators, initial_view_no): primary = ViewChangeService._find_primary(validators, initial_view_no) prev_primary = ViewChangeService._find_primary(validators, initial_view_no - 1) next_primary = ViewChangeService._find_primary(validators, initial_view_no + 1) assert primary in validators assert prev_primary in validators assert next_primary in validators assert primary != prev_primary assert primary != next_primary
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus, write_manager: WriteRequestManager = None, bls_bft_replica: BlsBftReplica = None): self._data = ConsensusSharedData(name, validators, 0) self._data.primary_name = primary_name self._orderer = OrderingService(data=self._data, timer=timer, bus=bus, network=network, write_manager=write_manager, bls_bft_replica=bls_bft_replica) self._checkpointer = CheckpointService(self._data, bus, network) self._view_changer = ViewChangeService(self._data, timer, bus, network) # TODO: This is just for testing purposes only self._data.checkpoints.append( Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest='empty'))
def view_change_service(internal_bus, external_bus, timer, stasher, validators): # TODO: Use validators fixture data = ConsensusSharedData("some_name", genNodeNames(4), 0) primaries_selector = RoundRobinConstantNodesPrimariesSelector(validators) return ViewChangeService(data, timer, internal_bus, external_bus, stasher, primaries_selector)
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus, write_manager: WriteRequestManager, bls_bft_replica: BlsBftReplica=None): self._data = ConsensusSharedData(name, validators, 0) self._data.primary_name = primary_name config = getConfig() stasher = StashingRouter(config.REPLICA_STASH_LIMIT, buses=[bus, network]) self._orderer = OrderingService(data=self._data, timer=timer, bus=bus, network=network, write_manager=write_manager, bls_bft_replica=bls_bft_replica, freshness_checker=FreshnessChecker( freshness_timeout=config.STATE_FRESHNESS_UPDATE_INTERVAL), stasher=stasher) self._checkpointer = CheckpointService(self._data, bus, network, stasher, write_manager.database_manager) self._view_changer = ViewChangeService(self._data, timer, bus, network, stasher) # TODO: This is just for testing purposes only self._data.checkpoints.append( Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw'))
def _init_view_change_service(self) -> ViewChangeService: return ViewChangeService( data=self._consensus_data, timer=self.node.timer, bus=self.internal_bus, network=self._external_bus, stasher=self.stasher, primaries_selector=self.node.primaries_selector)
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus, write_manager: WriteRequestManager, bls_bft_replica: BlsBftReplica = None): # ToDo: Maybe ConsensusSharedData should be initiated before and passed already prepared? self._internal_bus = bus self._data = ConsensusSharedData(name, validators, 0) self._data.primary_name = generateName(primary_name, self._data.inst_id) self.config = getConfig() self.stasher = StashingRouter(self.config.REPLICA_STASH_LIMIT, buses=[bus, network]) self._write_manager = write_manager self._primaries_selector = RoundRobinNodeRegPrimariesSelector( self._write_manager.node_reg_handler) self._orderer = OrderingService( data=self._data, timer=timer, bus=bus, network=network, write_manager=self._write_manager, bls_bft_replica=bls_bft_replica, freshness_checker=FreshnessChecker( freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL), primaries_selector=self._primaries_selector, stasher=self.stasher) self._checkpointer = CheckpointService(self._data, bus, network, self.stasher, write_manager.database_manager) self._view_changer = ViewChangeService(self._data, timer, bus, network, self.stasher, self._primaries_selector) self._message_requestor = MessageReqService(self._data, bus, network) self._add_ledgers() # TODO: This is just for testing purposes only self._data.checkpoints.append( Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw')) # ToDo: it should be done in Zero-view stage. write_manager.on_catchup_finished() self._data.primaries = self._view_changer._primaries_selector.select_primaries( self._data.view_no) # ToDo: ugly way to understand node_reg changing self._previous_node_reg = self._write_manager.node_reg_handler.committed_node_reg bus.subscribe(Ordered, self.emulate_ordered_processing)
def _view_change_acks(vc, vc_frm, primary, count): digest = ViewChangeService._view_change_digest(vc) non_senders = [ name for name in validators if name not in [vc_frm, primary] ] ack_frms = random.sample(non_senders, count) return [(ViewChangeAck(viewNo=vc.viewNo, name=vc_frm, digest=digest), ack_frm) for ack_frm in ack_frms]
def _service(name): data = consensus_data(name) digest = cp_digest(DEFAULT_STABLE_CHKP) cp = Checkpoint(instId=0, viewNo=initial_view_no, seqNoStart=0, seqNoEnd=DEFAULT_STABLE_CHKP, digest=digest) data.checkpoints.append(cp) service = ViewChangeService(data, timer, internal_bus, external_bus, stasher) return service
def _service(name): data = consensus_data(name) digest = cp_digest(DEFAULT_STABLE_CHKP) cp = Checkpoint(instId=0, viewNo=initial_view_no, seqNoStart=0, seqNoEnd=DEFAULT_STABLE_CHKP, digest=digest) data.checkpoints.append(cp) primaries_selector = RoundRobinConstantNodesPrimariesSelector( validators) service = ViewChangeService(data, timer, internal_bus, external_bus, stasher, primaries_selector) return service
def some_pool(random: SimRandom) -> (SimPool, List): pool_size = random.integer(4, 8) pool = SimPool(pool_size, random) # Create simulated history # TODO: Move into helper? faulty = (pool_size - 1) // 3 seq_no_per_cp = 10 max_batches = 50 batches = [ some_random_preprepare(random, 0, n) for n in range(1, max_batches) ] checkpoints = [ some_checkpoint(random, 0, n) for n in range(0, max_batches, seq_no_per_cp) ] # Preprepares pp_count = [random.integer(0, len(batches)) for _ in range(pool_size)] max_pp = sorted(pp_count)[faulty] # Prepares p_count = [random.integer(0, min(max_pp, pp)) for pp in pp_count] max_p = sorted(p_count)[faulty] # Checkpoints cp_count = [ 1 + random.integer(0, min(max_p, p)) // seq_no_per_cp for p in pp_count ] max_stable_cp_indx = sorted(cp_count)[faulty] - 1 stable_cp = [ checkpoints[random.integer(0, min(max_stable_cp_indx, cp))].seqNoEnd for cp in cp_count ] # Initialize consensus data for i, node in enumerate(pool.nodes): node._data.preprepared = batches[:pp_count[i]] node._data.prepared = batches[:p_count[i]] node._data.checkpoints = checkpoints[:cp_count[i]] node._data.stable_checkpoint = stable_cp[i] committed = [] for i in range(1, max_batches): prepare_count = sum(1 for node in pool.nodes if i <= len(node._data.prepared)) has_prepared_cert = prepare_count >= pool_size - faulty if has_prepared_cert: committed.append(ViewChangeService.batch_id(batches[i - 1])) return pool, committed
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus): self._data = ConsensusSharedData(name, validators, 0) self._data.primary_name = primary_name self._orderer = OrderingService(self._data, bus, network) self._checkpointer = CheckpointService(self._data, bus, network) self._view_changer = ViewChangeService(self._data, timer, bus, network) # TODO: This is just for testing purposes only self._data.checkpoints.append( Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest='empty'))
def test_non_primary_responds_to_view_change_message_with_view_change_ack_to_new_primary( some_item, other_item, validators, primary, view_change_service, initial_view_no, view_change_message): non_primary_name = some_item(validators, exclude=[primary(initial_view_no + 1)]) service = view_change_service(non_primary_name) vc = view_change_message(initial_view_no + 1) frm = other_item(validators, exclude=[non_primary_name]) service._network.process_incoming(vc, frm) assert len(service._network.sent_messages) == 1 msg, dst = service._network.sent_messages[0] assert dst == service._data.primary_name assert isinstance(msg, ViewChangeAck) assert msg.viewNo == vc.viewNo assert msg.name == frm assert msg.digest == ViewChangeService._view_change_digest(vc)
def _service(name): data = consensus_data(name) data.node_mode = Mode.participating digest = cp_digest(DEFAULT_STABLE_CHKP) cp = Checkpoint(instId=0, viewNo=initial_view_no, seqNoStart=0, seqNoEnd=DEFAULT_STABLE_CHKP, digest=digest) data.checkpoints.append(cp) ViewChangeTriggerService(data=data, timer=timer, bus=internal_bus, network=external_bus, db_manager=DatabaseManager(), stasher=stasher, is_master_degraded=lambda: False) primaries_selector = RoundRobinConstantNodesPrimariesSelector(validators) service = ViewChangeService(data, timer, internal_bus, external_bus, stasher, primaries_selector) return service
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus, write_manager: WriteRequestManager, bls_bft_replica: BlsBftReplica = None): self._data = ConsensusSharedData(name, validators, 0) self._data.primary_name = primary_name config = getConfig() stasher = StashingRouter(config.REPLICA_STASH_LIMIT) self._orderer = OrderingService(data=self._data, timer=timer, bus=bus, network=network, write_manager=write_manager, bls_bft_replica=bls_bft_replica, stasher=stasher) self._checkpointer = CheckpointService( self._data, bus, network, stasher, write_manager.database_manager, old_stasher=FakeSomething(unstash_watermarks=lambda: None)) self._view_changer = ViewChangeService(self._data, timer, bus, network) # TODO: This is just for testing purposes only self._data.checkpoints.append( Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest='empty'))
def test_view_change_digest_is_256_bit_hexdigest(view_change_message, random): vc = view_change_message(random.integer(0, 10000)) digest = ViewChangeService._view_change_digest(vc) assert isinstance(digest, str) assert len(digest) == 64 assert all(v in string.hexdigits for v in digest)
def _service(name): data = consensus_data(name) service = ViewChangeService(data, mock_timer, internal_bus, external_bus, stasher) return service
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus): self._data = ConsensusDataProvider(name, validators, primary_name) self._orderer = OrderingService(self._data, bus, network) self._checkpointer = CheckpointService(self._data, bus, network) self._view_changer = ViewChangeService(self._data, timer, bus, network)
def view_change_service(): data = ConsensusSharedData("some_name", genNodeNames(4), 0) return ViewChangeService(data, MockTimer(0), InternalBus(), MockNetwork())
def view_change_service(internal_bus, external_bus, timer, stasher): data = ConsensusSharedData("some_name", genNodeNames(4), 0) return ViewChangeService(data, timer, internal_bus, external_bus, stasher)
def __init__(self, name: str, network: ExternalBus): self._data = ConsensusDataProvider(name) self._orderer = OrderingService(self._data, network) self._checkpointer = CheckpointService(self._data, network) self._view_changer = ViewChangeService(self._data, network)
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus, write_manager: WriteRequestManager, bls_bft_replica: BlsBftReplica = None): # ToDo: Maybe ConsensusSharedData should be initiated before and passed already prepared? self._network = network self._data = ConsensusSharedData(name, validators, 0) self._data.primary_name = generateName(primary_name, self._data.inst_id) self._timer = timer self.config = getConfig() self.stasher = StashingRouter(self.config.REPLICA_STASH_LIMIT, buses=[bus, network]) self._write_manager = write_manager self._primaries_selector = RoundRobinNodeRegPrimariesSelector(self._write_manager.node_reg_handler) self._freshness_checker = FreshnessChecker(freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL) for ledger_id in [POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID]: self._freshness_checker.register_ledger(ledger_id=ledger_id, initial_time=self.get_time_for_3pc_batch()) self._orderer = OrderingService(data=self._data, timer=self._timer, bus=bus, network=network, write_manager=self._write_manager, bls_bft_replica=bls_bft_replica, freshness_checker=self._freshness_checker, get_time_for_3pc_batch=self.get_time_for_3pc_batch, stasher=self.stasher) self._checkpointer = CheckpointService(self._data, bus, network, self.stasher, write_manager.database_manager) self._view_changer = ViewChangeService(self._data, self._timer, bus, network, self.stasher, self._primaries_selector) self._view_change_trigger = ViewChangeTriggerService(data=self._data, timer=self._timer, bus=bus, network=network, db_manager=write_manager.database_manager, is_master_degraded=lambda: False, stasher=self.stasher) self._primary_connection_monitor = PrimaryConnectionMonitorService(data=self._data, timer=self._timer, bus=bus, network=network) self._freshness_monitor = FreshnessMonitorService(data=self._data, timer=self._timer, bus=bus, network=network, freshness_checker=self._freshness_checker, get_time_for_3pc_batch=self.get_time_for_3pc_batch) self._message_requestor = MessageReqService(self._data, bus, network) self._add_ledgers() # TODO: This is just for testing purposes only self._data.checkpoints.append( Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw')) # ToDo: it should be done in Zero-view stage. write_manager.on_catchup_finished() # Simulate node behavior self._internal_bus = bus self._internal_bus.subscribe(NodeNeedViewChange, self.process_node_need_view_change) self._internal_bus.subscribe(Ordered, self.emulate_ordered_processing) # ToDo: ugly way to understand node_reg changing self._previous_node_reg = self._write_manager.node_reg_handler.committed_node_reg
def _service(name): data = consensus_data(name) service = ViewChangeService(data, mock_timer, InternalBus(), MockNetwork()) return service
def view_change_service(consensus_data, mock_network): return ViewChangeService(consensus_data, mock_network)
def test_different_view_change_messages_have_different_digests( view_change_message, random): vc = view_change_message(random.integer(0, 10000)) other_vc = view_change_message(random.integer(0, 10000)) assert ViewChangeService._view_change_digest( vc) != ViewChangeService._view_change_digest(other_vc)
def _primary_in_view(view_no): return ViewChangeService._find_primary(validators, view_no)