def test_stashing_router_can_stash_and_sort_messages(): calls = [] def handler(message: SomeMessage): calls.append(message) return STASH def sort_key(message: SomeMessage): return message.int_field bus = InternalBus() router = StashingRouter(10) router.set_sorted_stasher(STASH, key=sort_key) router.subscribe(SomeMessage, handler) router.subscribe_to(bus) messages = [create_some_message() for _ in range(10)] for msg in messages: bus.send(msg) assert calls == messages calls.clear() router.process_all_stashed() assert calls == sorted(messages, key=sort_key)
def test_stashing_router_can_stash_messages(): stash_count = 3 calls = [] def handler(msg): nonlocal stash_count calls.append(msg) if stash_count > 0: stash_count -= 1 return STASH, "reason" else: return None, None bus = InternalBus() router = StashingRouter(10, buses=[bus]) router.subscribe(SomeMessage, handler) msg_a = create_some_message() msg_b = create_some_message() bus.send(msg_a) bus.send(msg_b) assert router.stash_size() == 2 assert calls == [msg_a, msg_b] router.process_all_stashed() assert router.stash_size() == 1 assert calls == [msg_a, msg_b, msg_a, msg_b] router.process_all_stashed() assert router.stash_size() == 0 assert calls == [msg_a, msg_b, msg_a, msg_b, msg_a] router.process_all_stashed() assert router.stash_size() == 0 assert calls == [msg_a, msg_b, msg_a, msg_b, msg_a]
def test_stashing_router_can_stash_messages_with_metadata(): stash_count = 3 calls = [] def handler(msg, frm): nonlocal stash_count calls.append((msg, frm)) if stash_count > 0: stash_count -= 1 return STASH bus = InternalBus() router = StashingRouter(10) router.subscribe(SomeMessage, handler) router.subscribe_to(bus) msg_a = create_some_message() msg_b = create_some_message() bus.send(msg_a, 'A') bus.send(msg_b, 'B') assert router.stash_size() == 2 assert calls == [(msg_a, 'A'), (msg_b, 'B')] router.process_all_stashed() assert router.stash_size() == 1 assert calls == [(msg_a, 'A'), (msg_b, 'B'), (msg_a, 'A'), (msg_b, 'B')] router.process_all_stashed() assert router.stash_size() == 0 assert calls == [(msg_a, 'A'), (msg_b, 'B'), (msg_a, 'A'), (msg_b, 'B'), (msg_a, 'A')] router.process_all_stashed() assert router.stash_size() == 0 assert calls == [(msg_a, 'A'), (msg_b, 'B'), (msg_a, 'A'), (msg_b, 'B'), (msg_a, 'A')]
def internal_bus(): def rp_handler(ib, msg): ib.msgs.setdefault(type(msg), []).append(msg) ib = InternalBus() ib.msgs = {} ib.subscribe(RequestPropagates, rp_handler) return ib
def __init__(self, name: str, validators: List[str], primary_name: str, timer: TimerService, bus: InternalBus, network: ExternalBus, write_manager: WriteRequestManager, bls_bft_replica: BlsBftReplica = None): # ToDo: Maybe ConsensusSharedData should be initiated before and passed already prepared? self._internal_bus = bus self._data = ConsensusSharedData(name, validators, 0) self._data.primary_name = generateName(primary_name, self._data.inst_id) self.config = getConfig() self.stasher = StashingRouter(self.config.REPLICA_STASH_LIMIT, buses=[bus, network]) self._write_manager = write_manager self._primaries_selector = RoundRobinNodeRegPrimariesSelector( self._write_manager.node_reg_handler) self._orderer = OrderingService( data=self._data, timer=timer, bus=bus, network=network, write_manager=self._write_manager, bls_bft_replica=bls_bft_replica, freshness_checker=FreshnessChecker( freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL), primaries_selector=self._primaries_selector, stasher=self.stasher) self._checkpointer = CheckpointService(self._data, bus, network, self.stasher, write_manager.database_manager) self._view_changer = ViewChangeService(self._data, timer, bus, network, self.stasher, self._primaries_selector) self._message_requestor = MessageReqService(self._data, bus, network) self._add_ledgers() # TODO: This is just for testing purposes only self._data.checkpoints.append( Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0, digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw')) # ToDo: it should be done in Zero-view stage. write_manager.on_catchup_finished() self._data.primaries = self._view_changer._primaries_selector.select_primaries( self._data.view_no) # ToDo: ugly way to understand node_reg changing self._previous_node_reg = self._write_manager.node_reg_handler.committed_node_reg bus.subscribe(Ordered, self.emulate_ordered_processing)
def test_stashing_router_correctly_handles_multiple_arguments(): handler = Mock(return_value=(PROCESS, "")) bus = InternalBus() router = StashingRouter(10, buses=[bus]) router.subscribe(SomeMessage, handler) message = create_some_message() bus.send(message, 'hello') handler.assert_called_once_with(message, 'hello')
def test_internal_bus_sequentially_routes_multiple_messages_of_different_types( ): all_messages = [ create_some_message() if random() < 0.5 else create_other_message() for _ in range(100) ] some_handler1 = Mock() some_handler2 = Mock() other_handler = Mock() bus = InternalBus() bus.subscribe(SomeMessage, some_handler1) bus.subscribe(SomeMessage, some_handler2) bus.subscribe(OtherMessage, other_handler) for message in all_messages: bus.send(message) assert some_handler1.mock_calls == [ call(msg) for msg in all_messages if isinstance(msg, SomeMessage) ] assert some_handler2.mock_calls == [ call(msg) for msg in all_messages if isinstance(msg, SomeMessage) ] assert other_handler.mock_calls == [ call(msg) for msg in all_messages if isinstance(msg, OtherMessage) ]
def __init__(self, tmpdir, config=None): self.basedirpath = tmpdir self.name = 'Node1' self.internal_bus = InternalBus() self.db_manager = DatabaseManager() self.timer = QueueTimer() self.f = 1 self.replicas = dict() self.requests = Requests() self.rank = None self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4'] self.nodeReg = { name: HA("127.0.0.1", 0) for name in self.allNodeNames } self.nodeIds = [] self.totalNodes = len(self.allNodeNames) self.mode = Mode.starting self.config = config or getConfigOnce() self.nodeStatusDB = None self.internal_bus = InternalBus() self.quorums = Quorums(self.totalNodes) self.nodestack = FakeSomething(connecteds=set(self.allNodeNames)) self.write_manager = FakeSomething() self.replicas = { 0: Replica(node=self, instId=0, isMaster=True, config=self.config), 1: Replica(node=self, instId=1, isMaster=False, config=self.config), 2: Replica(node=self, instId=2, isMaster=False, config=self.config) } self.requiredNumberOfInstances = 2 self._found = False self.ledgerManager = LedgerManager(self) ledger0 = FakeLedger(0, 10) ledger1 = FakeLedger(1, 5) self.ledgerManager.addLedger(0, ledger0) self.ledgerManager.addLedger(1, ledger1) self.quorums = Quorums(self.totalNodes) self.view_changer = create_view_changer(self) self.primaries_selector = RoundRobinPrimariesSelector() self.metrics = NullMetricsCollector() # For catchup testing self.catchup_rounds_without_txns = 0 self.view_change_in_progress = False self.ledgerManager.last_caught_up_3PC = (0, 0) self.master_last_ordered_3PC = (0, 0) self.seqNoDB = {} # callbacks self.onBatchCreated = lambda self, *args, **kwargs: True
def __init__(self, viewNo, quorums, ledger_ids): node_stack = FakeSomething( name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}) self.replicas = [] self.viewNo = viewNo super().__init__(name="fake node", ledger_ids=ledger_ids, _viewNo=viewNo, quorums=quorums, nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), mode=Mode.participating, view_change_in_progress=False, pre_view_change_in_progress=False, requests=Requests(), onBatchCreated=lambda self, *args, **kwargs: True, applyReq=lambda self, *args, **kwargs: True, primaries=[], get_validators=lambda: [], db_manager=None, internal_bus=InternalBus(), write_manager=FakeSomething( database_manager=DatabaseManager(), apply_request=lambda req, cons_time: None), timer=QueueTimer())
def fake_node(tconf): node = FakeSomething(config=tconf, timer=QueueTimer(), nodeStatusDB=None, master_replica=FakeSomething(inBox=deque(), inBoxRouter=Router(), _external_bus=MockNetwork(), internal_bus=InternalBus(), logger=FakeSomething( info=lambda *args, **kwargs: True )), name="Alpha", master_primary_name="Alpha", on_view_change_start=lambda *args, **kwargs: True, start_catchup=lambda *args, **kwargs: True, nodeInBox=deque(), nodeMsgRouter=Router(), metrics=None, process_one_node_message=None, quota_control=FakeSomething( node_quota=Quota(count=100, size=100)), nodestack=FakeSomething( service=lambda *args, **kwargs: eventually(lambda: True)), set_view_for_replicas= lambda view_no: None, set_view_change_status=lambda view_no: None ) node.metrics = functools.partial(Node._createMetricsCollector, node)() node.process_one_node_message = functools.partial(Node.process_one_node_message, node) return node
def fake_view_changer(request, tconf): node_count = 4 node_stack = FakeSomething(name="fake stack", connecteds={"Alpha", "Beta", "Gamma", "Delta"}, conns={"Alpha", "Beta", "Gamma", "Delta"}) monitor = FakeSomething(isMasterDegraded=lambda: False, areBackupsDegraded=lambda: [], prettymetrics='') node = FakeSomething(name="SomeNode", timer=QueueTimer(), viewNo=request.param, quorums=Quorums( getValueFromModule(request, 'nodeCount', default=node_count)), nodestack=node_stack, utc_epoch=lambda *args: get_utc_epoch(), config=tconf, monitor=monitor, discard=lambda a, b, c, d: print(b), primaries_disconnection_times=[None] * getRequiredInstances(node_count), master_primary_name='Alpha', master_replica=FakeSomething(instId=0), nodeStatusDB=None, internal_bus=InternalBus()) view_changer = create_view_changer(node) # TODO: This is a hack for tests compatibility, do something better view_changer.node = node return view_changer
def add_new_node(self, name): if name not in self.validators: self.validators.append(name) # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form replica_name = generateName(name, 0) handler = partial(self.network._send_message, replica_name) write_manager = create_test_write_req_manager(name, self._genesis_txns) write_manager.node_reg_handler.committed_node_reg_at_beginning_of_view[0] = self._genesis_validators write_manager.node_reg_handler.uncommitted_node_reg_at_beginning_of_view[0] = self._genesis_validators _internal_bus = InternalBus() self._internal_buses[name] = _internal_bus self._subscribe_to_internal_msgs(name) replica = ReplicaService(replica_name, self.validators, self._primary_name, self._timer, _internal_bus, self.network.create_peer(name, handler), write_manager=write_manager, bls_bft_replica=MockBlsBftReplica()) replica._data.node_mode = Mode.participating self._nodes.append(replica) self._update_connecteds() logger.info("Node {} was added into pool".format(name))
def test_ordered_cleaning(tconf): global_view_no = 2 node = FakeSomething(name="fake node", ledger_ids=[0], viewNo=global_view_no, utc_epoch=get_utc_epoch, get_validators=lambda: [], internal_bus=InternalBus(), db_manager=DatabaseManager()) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica._consensus_data.view_no = global_view_no total = [] num_requests_per_view = 3 for viewNo in range(global_view_no + 1): for seqNo in range(num_requests_per_view): reqId = viewNo, seqNo replica.addToOrdered(*reqId) total.append(reqId) # gc is called after stable checkpoint, since no request executed # in this test starting it manually replica._gc(100) # Requests with view lower then previous view # should not be in ordered assert len(replica.ordered) == len(total[num_requests_per_view:])
def __init__(self, node_count: int = 4, random: Optional[SimRandom] = None): self._random = random if random else DefaultSimRandom() self._timer = MockTimer() self._network = SimNetwork(self._timer, self._random, self._serialize_deserialize) self._nodes = [] validators = genNodeNames(node_count) # ToDo: maybe it should be a random too? primary_name = validators[0] genesis_txns = create_pool_txn_data( node_names=validators, crypto_factory=create_default_bls_crypto_factory(), get_free_port=partial(random.integer, 9000, 9999))['txns'] for name in validators: # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form replica_name = generateName(name, 0) handler = partial(self.network._send_message, replica_name) write_manager = create_test_write_req_manager(name, genesis_txns) write_manager.node_reg_handler.node_reg_at_beginning_of_view[ 0] = validators replica = ReplicaService(replica_name, validators, primary_name, self._timer, InternalBus(), self.network.create_peer(name, handler), write_manager=write_manager, bls_bft_replica=MockBlsBftReplica()) replica.config.NEW_VIEW_TIMEOUT = 30 * 1000 self._nodes.append(replica)
def test_process_all_stashed_doesnt_do_anything_when_there_are_no_items_in_stash(): handler = Mock(return_value=PROCESS) bus = InternalBus() router = StashingRouter(10) router.subscribe(SomeMessage, handler) router.subscribe_to(bus) router.process_all_stashed() handler.assert_not_called() message = create_some_message() bus.send(message, 'hello') handler.assert_called_once_with(message, 'hello') router.process_all_stashed() handler.assert_called_once_with(message, 'hello')
def test_event_bus_understands_different_no_params_messages(): a = NoParamsMessage() b = OtherNoParamsMessage() handler_a = Mock() handler_b = Mock() bus = InternalBus() bus.subscribe(NoParamsMessage, handler_a) bus.subscribe(OtherNoParamsMessage, handler_b) bus.send(b) handler_a.assert_not_called() handler_b.assert_called_once_with(b) bus.send(a) handler_a.assert_called_once_with(a) handler_b.assert_called_once_with(b)
def test_process_stashed_until_restash_doesnt_do_anything_when_there_are_no_items_in_stash( ): handler = Mock(return_value=(PROCESS, "")) bus = InternalBus() router = StashingRouter(10, buses=[bus]) router.subscribe(SomeMessage, handler) router.process_stashed_until_first_restash() handler.assert_not_called() message = create_some_message() bus.send(message, 'hello') handler.assert_called_once_with(message, 'hello') router.process_stashed_until_first_restash() handler.assert_called_once_with(message, 'hello')
def test_stashing_router_can_process_stashed_until_first_restash(): calls = [] def handler(msg): calls.append(msg) if len(calls) % 2 != 0: return STASH, "reason" else: return None, None bus = InternalBus() router = StashingRouter(10, buses=[bus]) router.subscribe(SomeMessage, handler) msg_a = create_some_message() msg_b = create_some_message() msg_c = create_some_message() msg_d = create_some_message() msg_e = create_some_message() bus.send(msg_a) bus.send(msg_b) bus.send(msg_c) bus.send(msg_d) bus.send(msg_e) assert router.stash_size() == 3 assert calls == [msg_a, msg_b, msg_c, msg_d, msg_e] # Stash contains A, C, E, going to stop on C router.process_stashed_until_first_restash() assert router.stash_size() == 2 assert calls == [msg_a, msg_b, msg_c, msg_d, msg_e, msg_a, msg_c] # Stash contains E, C, going to stop on C router.process_stashed_until_first_restash() assert router.stash_size() == 1 assert calls == [ msg_a, msg_b, msg_c, msg_d, msg_e, msg_a, msg_c, msg_e, msg_c ] # Stash contains C, not going to stop router.process_stashed_until_first_restash() assert router.stash_size() == 0 assert calls == [ msg_a, msg_b, msg_c, msg_d, msg_e, msg_a, msg_c, msg_e, msg_c, msg_c ] # Stash doesn't contain anything router.process_stashed_until_first_restash() assert router.stash_size() == 0 assert calls == [ msg_a, msg_b, msg_c, msg_d, msg_e, msg_a, msg_c, msg_e, msg_c, msg_c ]
def test_stashing_router_can_stash_messages_with_different_reasons(): calls = [] def handler(message: SomeMessage): calls.append(message) if message.int_field % 2 == 0: return STASH + 0, "reason" else: return STASH + 1, "reason" bus = InternalBus() router = StashingRouter(10, buses=[bus]) router.subscribe(SomeMessage, handler) messages = [create_some_message() for _ in range(10)] for msg in messages: bus.send(msg) assert router.stash_size() == len(messages) assert router.stash_size(STASH + 0) + router.stash_size(STASH + 1) == router.stash_size() calls.clear() router.process_all_stashed() assert router.stash_size() == len(messages) assert calls == sorted(messages, key=lambda m: m.int_field % 2) calls.clear() router.process_all_stashed(STASH + 0) assert router.stash_size() == len(messages) assert router.stash_size(STASH + 0) == len(calls) assert all(msg.int_field % 2 == 0 for msg in calls) assert all(msg in messages for msg in calls) calls.clear() router.process_all_stashed(STASH + 1) assert router.stash_size() == len(messages) assert router.stash_size(STASH + 1) == len(calls) assert all(msg.int_field % 2 != 0 for msg in calls) assert all(msg in messages for msg in calls)
def __init__(self, node_count: int = 4, random: Optional[SimRandom] = None): self._random = random if random else DefaultSimRandom() self._timer = MockTimer() self._network = SimNetwork(self._timer, self._random) validators = genNodeNames(node_count) primary_name = validators[0] self._nodes = [ ReplicaService(name, validators, primary_name, self._timer, InternalBus(), self.network.create_peer(name)) for name in validators ]
def test_internal_bus_doesnt_route_unregistered_message(): handler = Mock() bus = InternalBus() bus.subscribe(SomeMessage, handler) bus.send(create_other_message()) handler.assert_not_called()
def test_event_bus_routes_no_params_message(): message = NoParamsMessage() handler = Mock() bus = InternalBus() bus.subscribe(NoParamsMessage, handler) bus.send(message) handler.assert_called_once_with(message)
def test_internal_bus_can_route_messages_with_side_arguments(): message = create_some_message() handler = Mock() bus = InternalBus() bus.subscribe(SomeMessage, handler) bus.send(message, 'some_arg', 'other_arg') handler.assert_called_once_with(message, 'some_arg', 'other_arg')
def test_event_bus_routes_registered_message(): message = create_some_message() handler = Mock() bus = InternalBus() bus.subscribe(SomeMessage, handler) bus.send(message) handler.assert_called_once_with(message)
def __init__(self, node_count: int = 4, random: Optional[SimRandom] = None): self._random = random if random else DefaultSimRandom() self._timer = MockTimer() self._network = SimNetwork(self._timer, self._random) validators = genNodeNames(node_count) primary_name = validators[0] genesis_txns = create_pool_txn_data( node_names=validators, crypto_factory=create_default_bls_crypto_factory(), get_free_port=partial(random.integer, 9000, 9999))['txns'] self._nodes = [ReplicaService(name, validators, primary_name, self._timer, InternalBus(), self.network.create_peer(name), write_manager=create_test_write_req_manager(name, genesis_txns)) for name in validators]
def test_internal_bus_routes_messages_to_all_subscribers(): message = create_some_message() handler1 = Mock() handler2 = Mock() bus = InternalBus() bus.subscribe(SomeMessage, handler1) bus.subscribe(SomeMessage, handler2) bus.send(message) handler1.assert_called_once_with(message) handler2.assert_called_once_with(message)
def test_primary_names_cleaning(tconf): node = FakeSomething(name="fake node", ledger_ids=[0], viewNo=0, utc_epoch=get_utc_epoch, get_validators=lambda: [], internal_bus=InternalBus(), db_manager=DatabaseManager(), requests=[], mode=Mode.participating, timer=QueueTimer(), quorums=Quorums(4), write_manager=None) bls_bft_replica = FakeSomething(gc=lambda *args: None, ) replica = Replica(node, instId=0, config=tconf, bls_bft_replica=bls_bft_replica) replica.primaryName = "Node1:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node2:0" assert list(replica.primaryNames.items()) == \ [(0, "Node1:0"), (1, "Node2:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node3:0" assert list(replica.primaryNames.items()) == \ [(1, "Node2:0"), (2, "Node3:0")] node.viewNo += 1 replica._consensus_data.view_no = node.viewNo replica.primaryName = "Node4:0" assert list(replica.primaryNames.items()) == \ [(2, "Node3:0"), (3, "Node4:0")]
def __init__(self, node_count: int = 4, random: Optional[SimRandom] = None): self._random = random if random else DefaultSimRandom() self._timer = MockTimer() self._network = SimNetwork(self._timer, self._random, self._serialize_deserialize) self._nodes = [] validators = genNodeNames(node_count) # ToDo: maybe it should be a random too? primary_name = validators[0] genesis_txns = create_pool_txn_data( node_names=validators, crypto_factory=create_default_bls_crypto_factory(), get_free_port=partial(random.integer, 9000, 9999))['txns'] for name in validators: # TODO: emulate it the same way as in Replica, that is sender must have 'node_name:inst_id' form replica_name = generateName(name, 0) handler = partial(self.network._send_message, replica_name) write_manager = create_test_write_req_manager(name, genesis_txns) replica = ReplicaService(replica_name, validators, primary_name, self._timer, InternalBus(), self.network.create_peer(name, handler), write_manager=write_manager, bls_bft_replica=MockBlsBftReplica()) # ToDo: For now, future_primary_handler is depended from the node. # And for now we need to patching set_node_state functionality future_primaries_handler = FuturePrimariesBatchHandler( write_manager.database_manager, FakeSomething(nodeReg={}, nodeIds=[])) future_primaries_handler._get_primaries = lambda *args, **kwargs: replica._data.primaries write_manager.register_batch_handler(future_primaries_handler) # ToDo: also, it should be done at the zero-view stage. write_manager.future_primary_handler.set_node_state() replica.config.NEW_VIEW_TIMEOUT = 30 * 1000 self._nodes.append(replica)
def test_stashing_router_correctly_routes_messages(): some_handler = Mock(return_value=(PROCESS, "")) other_handler = Mock(return_value=(DISCARD, "")) bus = InternalBus() router = StashingRouter(10, buses=[bus]) router.subscribe(SomeMessage, some_handler) router.subscribe(OtherMessage, other_handler) some_handler.assert_not_called() other_handler.assert_not_called() some_message = create_some_message() bus.send(some_message) some_handler.assert_called_once_with(some_message) other_handler.assert_not_called() other_message = create_other_message() bus.send(other_message) some_handler.assert_called_once_with(some_message) other_handler.assert_called_once_with(other_message)
def __init__(self, node: 'plenum.server.node.Node', instId: int, config=None, isMaster: bool = False, bls_bft_replica: BlsBftReplica = None, metrics: MetricsCollector = NullMetricsCollector(), get_current_time=None, get_time_for_3pc_batch=None): """ Create a new replica. :param node: Node on which this replica is located :param instId: the id of the protocol instance the replica belongs to :param isMaster: is this a replica of the master protocol instance """ HasActionQueue.__init__(self) self.get_current_time = get_current_time or time.perf_counter self.get_time_for_3pc_batch = get_time_for_3pc_batch or node.utc_epoch # self.stats = Stats(TPCStat) self.config = config or getConfig() self.metrics = metrics self.node = node self.instId = instId self.name = self.generateName(node.name, self.instId) self.logger = getlogger(self.name) self.validator = ReplicaValidator(self) self.outBox = deque() """ This queue is used by the replica to send messages to its node. Replica puts messages that are consumed by its node """ self.inBox = deque() """ This queue is used by the replica to receive messages from its node. Node puts messages that are consumed by the replica """ self.inBoxStash = deque() """ If messages need to go back on the queue, they go here temporarily and are put back on the queue on a state change """ self._is_master = isMaster # Dictionary to keep track of the which replica was primary during each # view. Key is the view no and value is the name of the primary # replica during that view self.primaryNames = OrderedDict() # type: OrderedDict[int, str] # Flag being used for preterm exit from the loop in the method # `processStashedMsgsForNewWaterMarks`. See that method for details. self.consumedAllStashedMsgs = True self._freshness_checker = FreshnessChecker( freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL) self._bls_bft_replica = bls_bft_replica self._state_root_serializer = state_roots_serializer # Did we log a message about getting request while absence of primary self.warned_no_primary = False self._consensus_data = ConsensusSharedData( self.name, self.node.poolManager.node_names_ordered_by_rank(), self.instId, self.isMaster) self._internal_bus = InternalBus() self._external_bus = ExternalBus(send_handler=self.send) self.stasher = self._init_replica_stasher() self._subscription = Subscription() self._bootstrap_consensus_data() self._subscribe_to_external_msgs() self._subscribe_to_internal_msgs() self._checkpointer = self._init_checkpoint_service() self._ordering_service = self._init_ordering_service() self._message_req_service = self._init_message_req_service() self._view_change_service = self._init_view_change_service() for ledger_id in self.ledger_ids: self.register_ledger(ledger_id)