def __init__(self, owner, postAllLedgersCaughtUp: Optional[Callable] = None, preCatchupClbk: Optional[Callable] = None, postCatchupClbk: Optional[Callable] = None, ledger_sync_order: Optional[List] = None, metrics: MetricsCollector = NullMetricsCollector()): # If ledger_sync_order is not provided (is None), it is assumed that # `postCatchupCompleteClbk` of the LedgerInfo will be used self.owner = owner self._timer = owner.timer self.postAllLedgersCaughtUp = postAllLedgersCaughtUp self.preCatchupClbk = preCatchupClbk self.postCatchupClbk = postCatchupClbk self.ledger_sync_order = ledger_sync_order self.request_ledger_status_action_ids = dict() self.request_consistency_proof_action_ids = dict() self.metrics = metrics config = getConfig() provider = CatchupNodeDataProvider(owner) self._client_seeder_inbox, rx = create_direct_channel() self._client_seeder = ClientSeederService(rx, provider) self._node_seeder_inbox, rx = create_direct_channel() self._node_seeder = NodeSeederService(rx, provider) leecher_outbox_tx, leecher_outbox_rx = create_direct_channel() router = Router(leecher_outbox_rx) router.add(LedgerCatchupStart, self._on_ledger_sync_start) router.add(LedgerCatchupComplete, self._on_ledger_sync_complete) router.add(NodeCatchupComplete, self._on_catchup_complete) self._node_leecher_inbox, rx = create_direct_channel() self._node_leecher = NodeLeecherService(config=config, input=rx, output=leecher_outbox_tx, timer=self._timer, metrics=self.metrics, provider=provider) # Holds ledgers of different types with their info like callbacks, state, etc self.ledgerRegistry = {} # type: Dict[int, LedgerInfo] # Largest 3 phase key received during catchup. # This field is needed to discard any stashed 3PC messages or # ordered messages since the transactions part of those messages # will be applied when they are received through the catchup process self.last_caught_up_3PC = (0, 0)
def __init__(self, owner, postAllLedgersCaughtUp: Optional[Callable] = None, preCatchupClbk: Optional[Callable] = None, postCatchupClbk: Optional[Callable] = None, ledger_sync_order: Optional[List] = None, metrics: MetricsCollector = NullMetricsCollector()): # If ledger_sync_order is not provided (is None), it is assumed that # `postCatchupCompleteClbk` of the LedgerInfo will be used self.owner = owner self._timer = owner.timer self.postAllLedgersCaughtUp = postAllLedgersCaughtUp self.preCatchupClbk = preCatchupClbk self.postCatchupClbk = postCatchupClbk self.ledger_sync_order = ledger_sync_order self.request_ledger_status_action_ids = dict() self.request_consistency_proof_action_ids = dict() self.metrics = metrics self._provider = CatchupNodeDataProvider(owner) self._client_seeder_inbox, rx = create_direct_channel() self._client_seeder = ClientSeederService(rx, self._provider) self._node_seeder_inbox, rx = create_direct_channel() self._node_seeder = NodeSeederService(rx, self._provider) self._leecher_outbox, rx = create_direct_channel() router = Router(rx) router.add(LedgerCatchupComplete, self._on_catchup_rep_service_stop) router.add(ConsProofReady, self._on_cons_proof_service_stop) self.config = getConfig() # Holds ledgers of different types with # their info like callbacks, state, etc self.ledgerRegistry = {} # type: Dict[int, LedgerInfo] self._leechers = { } # type: Dict[int, LedgerManager.LedgerLeecherService] # Largest 3 phase key received during catchup. # This field is needed to discard any stashed 3PC messages or # ordered messages since the transactions part of those messages # will be applied when they are received through the catchup process self.last_caught_up_3PC = (0, 0) # Nodes are added in this set when the current node sent a CatchupReq # for them and waits a CatchupRep message. self.wait_catchup_rep_from = set()
def __init__(self, ledger_id: int, config: object, input: RxChannel, output: TxChannel, timer: TimerService, metrics: MetricsCollector, provider: CatchupDataProvider): self._ledger_id = ledger_id self._ledger = provider.ledger(ledger_id) self._config = config self._output = output self._timer = timer self.metrics = metrics self._provider = provider self._state = LedgerState.not_synced # TODO: Improve enum self._catchup_till = None # type: Optional[CatchupTill] self._num_txns_caught_up = 0 services_tx, services_rx = create_direct_channel() router = Router(services_rx) router.add(LedgerCatchupStart, self._on_catchup_start) router.add(LedgerCatchupComplete, self._on_catchup_complete) self._cons_proof_service = ConsProofService(ledger_id=ledger_id, config=config, input=input, output=services_tx, timer=self._timer, metrics=self.metrics, provider=self._provider) self._catchup_rep_service = CatchupRepService(ledger_id=ledger_id, config=config, input=input, output=services_tx, timer=self._timer, metrics=self.metrics, provider=self._provider)
def __init__(self, config: object, input: RxChannel, output: TxChannel, timer: TimerService, metrics: MetricsCollector, provider: CatchupDataProvider): self._config = config self._input = input self._output = output self._timer = timer self.metrics = metrics self._provider = provider self._state = self.State.Idle self._catchup_till = {} # type: Dict[int, CatchupTill] # TODO: Get rid of this, theoretically most ledgers can be synced in parallel self._current_ledger = None # type: Optional[int] self._leecher_outbox, self._leecher_outbox_rx = create_direct_channel() self._leecher_outbox_rx.subscribe(lambda msg: output.put_nowait(msg)) Router(self._leecher_outbox_rx).add(LedgerCatchupComplete, self._on_ledger_catchup_complete) self._leechers = {} # type: Dict[int, LedgerLeecherService]
def addLedger(self, iD: int, ledger: Ledger, preCatchupStartClbk: Callable = None, postCatchupCompleteClbk: Callable = None, postTxnAddedToLedgerClbk: Callable = None): if iD in self.ledgerRegistry: logger.error("{} already present in ledgers " "so cannot replace that ledger".format(iD)) return self.ledgerRegistry[iD] = LedgerInfo( iD, ledger=ledger, preCatchupStartClbk=preCatchupStartClbk, postCatchupCompleteClbk=postCatchupCompleteClbk, postTxnAddedToLedgerClbk=postTxnAddedToLedgerClbk, verifier=MerkleVerifier(ledger.hasher)) cons_proof_inbox_tx, cons_proof_inbox_rx = create_direct_channel() cons_proof_service = ConsProofService(ledger_id=iD, config=self.config, input=cons_proof_inbox_rx, output=self._leecher_outbox, timer=self._timer, metrics=self.metrics, provider=self._provider) catchup_rep_inbox_tx, catchup_rep_inbox_rx = create_direct_channel() catchup_rep_service = CatchupRepService(ledger_id=iD, config=self.config, input=catchup_rep_inbox_rx, output=self._leecher_outbox, timer=self._timer, metrics=self.metrics, provider=self._provider) self._leechers[iD] = self.LedgerLeecherService( cons_proof_inbox=cons_proof_inbox_tx, cons_proof_service=cons_proof_service, catchup_rep_inbox=catchup_rep_inbox_tx, catchup_rep_service=catchup_rep_service)
def create_fake_catchup_rep_service(ledger: Ledger): class FakeCatchupProvider(CatchupDataProvider): def __init__(self, ledger): self._ledger = ledger def all_nodes_names(self): pass def node_name(self) -> str: pass def ledgers(self) -> List[int]: return [0] def ledger(self, ledger_id: int) -> Ledger: if ledger_id == 0: return self._ledger def verifier(self, ledger_id: int) -> MerkleVerifier: pass def eligible_nodes(self) -> List[str]: pass def three_phase_key_for_txn_seq_no(self, ledger_id: int, seq_no: int) -> Tuple[int, int]: pass def update_txn_with_extra_data(self, txn: dict) -> dict: pass def transform_txn_for_ledger(self, txn: dict) -> dict: pass def notify_catchup_start(self, ledger_id: int): pass def notify_catchup_complete(self, ledger_id: int, last_3pc: Tuple[int, int]): pass def notify_transaction_added_to_ledger(self, ledger_id: int, txn: dict): pass def send_to(self, msg: Any, to: str, message_splitter: Optional[Callable] = None): pass def send_to_nodes(self, msg: Any): pass def blacklist_node(self, node_name: str, reason: str): pass def discard(self, msg, reason, logMethod=logging.error, cliOutput=False): pass _, input_rx = create_direct_channel() provider = FakeCatchupProvider(ledger) service = CatchupRepService(ledger_id=0, config=None, input=input_rx, output=None, timer=None, metrics=NullMetricsCollector(), provider=provider) return service