def __init__(self, tmpdir, config=None):
     self.basedirpath = tmpdir
     self.name = 'Node1'
     self.f = 1
     self.replicas = dict()
     self.requests = []
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.nodeReg = {
         name: HA("127.0.0.1", 0) for name in self.allNodeNames
     }
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.config = config or getConfigOnce()
     self.replicas = {
         0: Replica(node=self, instId=0, isMaster=True, config=self.config),
         1: Replica(node=self, instId=1, isMaster=False, config=self.config),
         2: Replica(node=self, instId=2, isMaster=False, config=self.config),
     }
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
     self.view_changer = ViewChanger(self)
     self.elector = PrimarySelector(self)
     self.metrics = NullMetricsCollector()
Exemple #2
0
    def __init__(
            self,
            data: ConsensusSharedData,
            bus: InternalBus,
            network: ExternalBus,
            stasher: StashingRouter,
            db_manager: DatabaseManager,
            old_stasher: ReplicaStasher,
            metrics: MetricsCollector = NullMetricsCollector(),
    ):
        self._data = data
        self._bus = bus
        self._network = network
        self._checkpoint_state = SortedDict(lambda k: k[1])
        self._stasher = stasher
        self._validator = CheckpointMsgValidator(self._data)
        self._db_manager = db_manager
        self.metrics = metrics

        # Stashed checkpoints for each view. The key of the outermost
        # dictionary is the view_no, value being a dictionary with key as the
        # range of the checkpoint and its value again being a mapping between
        # senders and their sent checkpoint
        # Dict[view_no, Dict[(seqNoStart, seqNoEnd),  Dict[sender, Checkpoint]]]
        self._stashed_recvd_checkpoints = {}

        self._config = getConfig()
        self._logger = getlogger()

        self._old_stasher = old_stasher
Exemple #3
0
    def __init__(
            self,
            data: ConsensusSharedData,
            bus: InternalBus,
            network: ExternalBus,
            stasher: StashingRouter,
            db_manager: DatabaseManager,
            metrics: MetricsCollector = NullMetricsCollector(),
    ):
        self._data = data
        self._bus = bus
        self._network = network
        self._stasher = stasher
        self._subscription = Subscription()
        self._validator = CheckpointMsgValidator(self._data)
        self._db_manager = db_manager
        self.metrics = metrics

        # Received checkpoints, mapping CheckpointKey -> List(node_alias)
        self._received_checkpoints = defaultdict(
            set)  # type: Dict[CheckpointService.CheckpointKey, Set[str]]

        self._config = getConfig()
        self._logger = getlogger()

        self._subscription.subscribe(stasher, Checkpoint,
                                     self.process_checkpoint)

        self._subscription.subscribe(bus, Ordered, self.process_ordered)
        self._subscription.subscribe(bus, BackupSetupLastOrdered,
                                     self.process_backup_setup_last_ordered)
        self._subscription.subscribe(bus, NewViewAccepted,
                                     self.process_new_view_accepted)
Exemple #4
0
    def __init__(self,
                 data: ConsensusSharedData,
                 bus: InternalBus,
                 network: ExternalBus,
                 metrics=NullMetricsCollector()):
        self._logger = getlogger()
        self._data = data
        self._bus = bus
        self._subscription = Subscription()
        self._subscription.subscribe(bus, MissingMessage,
                                     self.process_missing_message)
        self._subscription.subscribe(bus, Ordered, self.process_ordered)
        self._subscription.subscribe(bus, ViewChangeStarted,
                                     self.process_view_change_started)
        self._subscription.subscribe(bus, CheckpointStabilized,
                                     self.process_checkpoint_stabilized)

        self._network = network
        self._subscription.subscribe(network, MessageReq,
                                     self.process_message_req)
        self._subscription.subscribe(network, MessageRep,
                                     self.process_message_rep)

        self.metrics = metrics
        self.handlers = {
            PREPREPARE: PreprepareHandler(self._data),
            PREPARE: PrepareHandler(self._data),
            COMMIT: CommitHandler(self._data),
            VIEW_CHANGE: ViewChangeHandler(self._data)
        }
        self.three_pc_handlers = {PREPREPARE, PREPARE, COMMIT}
Exemple #5
0
    def __init__(self,
                 stackParams: dict,
                 msgHandler: Callable,
                 registry: Dict[str, HA],
                 seed=None,
                 sighex: str = None,
                 config=None,
                 msgRejectHandler=None,
                 metrics=NullMetricsCollector(),
                 mt_incoming_size=None,
                 mt_outgoing_size=None):

        KITNetworkInterface.__init__(self, registry=registry)

        simple_zstack_class.__init__(self,
                                     stackParams,
                                     msgHandler,
                                     seed=seed,
                                     sighex=sighex,
                                     config=config,
                                     msgRejectHandler=msgRejectHandler,
                                     metrics=metrics,
                                     mt_incoming_size=mt_incoming_size,
                                     mt_outgoing_size=mt_outgoing_size)

        self._retry_connect = {}
Exemple #6
0
    def __init__(self,
                 data: ConsensusSharedData,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 db_manager: DatabaseManager,
                 stasher: StashingRouter,
                 is_master_degraded: Callable[[], bool],
                 metrics: MetricsCollector = NullMetricsCollector()):
        self._data = data
        self._timer = timer
        self._bus = bus
        self._network = network
        self._stasher = stasher
        self._is_master_degraded = is_master_degraded
        self.metrics = metrics

        self._config = getConfig()

        self._instance_changes = \
            InstanceChangeProvider(outdated_ic_interval=self._config.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL,
                                   node_status_db=db_manager.get_store(NODE_STATUS_DB_LABEL),
                                   time_provider=timer.get_current_time)

        self._subscription = Subscription()
        self._subscription.subscribe(bus, VoteForViewChange, self.process_vote_for_view_change)
        self._subscription.subscribe(bus, NewViewAccepted, self.process_new_view_accepted)
        self._subscription.subscribe(stasher, InstanceChange, self.process_instance_change)
    def __init__(self,
                 data: ConsensusSharedData,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 metrics: MetricsCollector = NullMetricsCollector()):
        self._data = data
        self._timer = timer
        self._bus = bus
        self._network = network
        self.metrics = metrics

        self._config = getConfig()

        self._primary_disconnection_time = timer.get_current_time()  # type: Optional[float]

        self._propose_view_change_timer = RepeatingTimer(timer=timer,
                                                         interval=self._config.NEW_VIEW_TIMEOUT,
                                                         callback=self._propose_view_change_if_needed,
                                                         active=False)

        self._subscription = Subscription()
        self._subscription.subscribe(network, ExternalBus.Connected, self.process_connected)
        self._subscription.subscribe(network, ExternalBus.Disconnected, self.process_disconnected)
        self._subscription.subscribe(bus, PrimarySelected, self.process_primary_selected)
        self._subscription.subscribe(bus, NodeStatusUpdated, self.process_node_status_updated)

        if self._data.is_master:
            self._schedule_primary_connection_check(delay=self._config.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT)
    def __init__(self,
                 data: ConsensusSharedData,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 freshness_checker: FreshnessChecker,
                 get_time_for_3pc_batch: Optional[Callable[[], int]] = None,
                 metrics: MetricsCollector = NullMetricsCollector()):
        self._data = data
        self._timer = timer
        self._bus = bus
        self._network = network
        self.metrics = metrics

        self._freshness_checker = freshness_checker
        self._get_time_for_3pc_batch = get_time_for_3pc_batch if get_time_for_3pc_batch is not None else get_utc_epoch

        self._config = getConfig()

        # Start periodic freshness check
        state_freshness_update_interval = self._config.STATE_FRESHNESS_UPDATE_INTERVAL
        if state_freshness_update_interval > 0:
            self._check_freshness_timer = RepeatingTimer(
                self._timer, state_freshness_update_interval,
                self._check_freshness)
Exemple #9
0
 def __init__(self, config=None, metrics=NullMetricsCollector()):
     """
     :param self: 'NodeStacked'
     :param config: 'stp config'
     """
     self.outBoxes = {}  # type: Dict[int, deque]
     self.stp_config = config or getConfig()
     self.msg_len_val = MessageLenValidator(self.stp_config.MSG_LEN_LIMIT)
     self.metrics = metrics
Exemple #10
0
 def __init__(self, stackParams: dict, msgHandler: Callable,
              registry: Dict[str, HA], seed=None, sighex: str=None,
              config=None, metrics=NullMetricsCollector()):
     config = config or getConfig()
     Batched.__init__(self, config=config, metrics=metrics)
     KITZStack.__init__(self, stackParams, msgHandler, registry=registry,
                        seed=seed, sighex=sighex, config=config,
                        metrics=metrics,
                        mt_incoming_size=MetricType.INCOMING_NODE_MESSAGE_SIZE,
                        mt_outgoing_size=MetricType.OUTGOING_NODE_MESSAGE_SIZE)
     MessageProcessor.__init__(self, allowDictOnly=False)
 def __init__(self,
              node_id,
              bls_bft: BlsBft,
              is_master,
              metrics: MetricsCollector = NullMetricsCollector()):
     super().__init__(bls_bft, is_master)
     self.node_id = node_id
     self._signatures = {}
     self._bls_latest_multi_sig = None  # MultiSignature
     self.state_root_serializer = state_roots_serializer
     self.metrics = metrics
 def __init__(self,
              node_id,
              bls_bft: BlsBft,
              is_master,
              database_manager: DatabaseManager,
              metrics: MetricsCollector = NullMetricsCollector()):
     super().__init__(bls_bft, is_master)
     self._all_bls_latest_multi_sigs = None
     self.node_id = node_id
     self._database_manager = database_manager
     self._all_signatures = {}
     self.state_root_serializer = state_roots_serializer
     self.metrics = metrics
Exemple #13
0
 def __init__(self,
              node,
              monitor: Monitor,
              config=None,
              metrics: MetricsCollector = NullMetricsCollector()):
     # passing full node because Replica requires it
     self._node = node
     self._monitor = monitor
     self._metrics = metrics
     self._config = config
     self._replicas = SortedDict()  # type: SortedDict[int, Replica]
     self._messages_to_replicas = dict()  # type: Dict[deque]
     self.register_monitor_handler()
    def __init__(self, tmpdir, config=None):
        node_names = ['Node1', 'Node2', 'Node3', 'Node4']
        self.basedirpath = tmpdir
        self.name = node_names[0]
        self.viewNo = 0
        self.db_manager = DatabaseManager()
        self.timer = QueueTimer()
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = node_names
        self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
        self.nodeIds = []
        self.totalNodes = len(self.allNodeNames)
        self.poolManager = FakeSomething(
            node_names_ordered_by_rank=lambda: node_names)
        self.mode = Mode.starting
        self.monitor = FakeSomething(isMasterDegraded=lambda: False)
        self.config = config or getConfigOnce()
        self.nodeStatusDB = None
        self.quorums = Quorums(self.totalNodes)
        self.nodestack = FakeSomething(connecteds=set(self.allNodeNames))
        self.write_manager = FakeSomething(
            node_reg_handler=NodeRegHandler(self.db_manager))
        self.primaries_selector = RoundRobinConstantNodesPrimariesSelector(
            node_names)
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False,
                       config=self.config),
            2: Replica(node=self, instId=2, isMaster=False, config=self.config)
        }
        self.requiredNumberOfInstances = 2
        self._found = False
        self.ledgerManager = LedgerManager(self)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

        # callbacks
        self.onBatchCreated = lambda self, *args, **kwargs: True
    def __init__(self,
                 owner,
                 postAllLedgersCaughtUp: Optional[Callable] = None,
                 preCatchupClbk: Optional[Callable] = None,
                 postCatchupClbk: Optional[Callable] = None,
                 ledger_sync_order: Optional[List] = None,
                 metrics: MetricsCollector = NullMetricsCollector()):
        # If ledger_sync_order is not provided (is None), it is assumed that
        # `postCatchupCompleteClbk` of the LedgerInfo will be used
        self.owner = owner
        self._timer = owner.timer
        self.postAllLedgersCaughtUp = postAllLedgersCaughtUp
        self.preCatchupClbk = preCatchupClbk
        self.postCatchupClbk = postCatchupClbk
        self.ledger_sync_order = ledger_sync_order
        self.request_ledger_status_action_ids = dict()
        self.request_consistency_proof_action_ids = dict()
        self.metrics = metrics

        config = getConfig()
        provider = CatchupNodeDataProvider(owner)

        self._client_seeder_inbox, rx = create_direct_channel()
        self._client_seeder = ClientSeederService(rx, provider)

        self._node_seeder_inbox, rx = create_direct_channel()
        self._node_seeder = NodeSeederService(rx, provider)

        leecher_outbox_tx, leecher_outbox_rx = create_direct_channel()
        router = Router(leecher_outbox_rx)
        router.add(LedgerCatchupStart, self._on_ledger_sync_start)
        router.add(LedgerCatchupComplete, self._on_ledger_sync_complete)
        router.add(NodeCatchupComplete, self._on_catchup_complete)

        self._node_leecher_inbox, rx = create_direct_channel()
        self._node_leecher = NodeLeecherService(config=config,
                                                input=rx,
                                                output=leecher_outbox_tx,
                                                timer=self._timer,
                                                metrics=self.metrics,
                                                provider=provider)

        # Holds ledgers of different types with their info like callbacks, state, etc
        self.ledgerRegistry = {}  # type: Dict[int, LedgerInfo]

        # Largest 3 phase key received during catchup.
        # This field is needed to discard any stashed 3PC messages or
        # ordered messages since the transactions part of those messages
        # will be applied when they are received through the catchup process
        self.last_caught_up_3PC = (0, 0)
Exemple #16
0
    def __init__(self,
                 owner,
                 postAllLedgersCaughtUp: Optional[Callable] = None,
                 preCatchupClbk: Optional[Callable] = None,
                 postCatchupClbk: Optional[Callable] = None,
                 ledger_sync_order: Optional[List] = None,
                 metrics: MetricsCollector = NullMetricsCollector()):
        # If ledger_sync_order is not provided (is None), it is assumed that
        # `postCatchupCompleteClbk` of the LedgerInfo will be used
        self.owner = owner
        self._timer = owner.timer
        self.postAllLedgersCaughtUp = postAllLedgersCaughtUp
        self.preCatchupClbk = preCatchupClbk
        self.postCatchupClbk = postCatchupClbk
        self.ledger_sync_order = ledger_sync_order
        self.request_ledger_status_action_ids = dict()
        self.request_consistency_proof_action_ids = dict()
        self.metrics = metrics

        self._provider = CatchupNodeDataProvider(owner)

        self._client_seeder_inbox, rx = create_direct_channel()
        self._client_seeder = ClientSeederService(rx, self._provider)

        self._node_seeder_inbox, rx = create_direct_channel()
        self._node_seeder = NodeSeederService(rx, self._provider)

        self._leecher_outbox, rx = create_direct_channel()
        router = Router(rx)
        router.add(LedgerCatchupComplete, self._on_catchup_rep_service_stop)
        router.add(ConsProofReady, self._on_cons_proof_service_stop)

        self.config = getConfig()

        # Holds ledgers of different types with
        # their info like callbacks, state, etc
        self.ledgerRegistry = {}  # type: Dict[int, LedgerInfo]

        self._leechers = {
        }  # type: Dict[int, LedgerManager.LedgerLeecherService]

        # Largest 3 phase key received during catchup.
        # This field is needed to discard any stashed 3PC messages or
        # ordered messages since the transactions part of those messages
        # will be applied when they are received through the catchup process
        self.last_caught_up_3PC = (0, 0)

        # Nodes are added in this set when the current node sent a CatchupReq
        # for them and waits a CatchupRep message.
        self.wait_catchup_rep_from = set()
Exemple #17
0
    def __init__(self,
                 stackParams: dict,
                 msgHandler: Callable,
                 seed=None,
                 config=None,
                 msgRejectHandler=None,
                 metrics=NullMetricsCollector(),
                 timer=None):
        config = config or getConfig()

        simple_zstack_class.__init__(
            self,
            stackParams,
            msgHandler,
            seed=seed,
            onlyListener=True,
            config=config,
            msgRejectHandler=msgRejectHandler,
            create_listener_monitor=config.TRACK_CONNECTED_CLIENTS_NUM_ENABLED,
            metrics=metrics,
            mt_incoming_size=MetricsName.INCOMING_CLIENT_MESSAGE_SIZE,
            mt_outgoing_size=MetricsName.OUTGOING_CLIENT_MESSAGE_SIZE,
            timer=timer)
        MessageProcessor.__init__(self, allowDictOnly=False)

        if config.CLIENT_STACK_RESTART_ENABLED and not config.TRACK_CONNECTED_CLIENTS_NUM_ENABLED:
            error_str = '{}: client stack restart is enabled (CLIENT_STACK_RESTART_ENABLED) ' \
                        'but connections tracking is disabled (TRACK_CONNECTED_CLIENTS_NUM_ENABLED), ' \
                        'please check your configuration'.format(self)
            raise RuntimeError(error_str)

        self.track_connected_clients_num_enabled = config.TRACK_CONNECTED_CLIENTS_NUM_ENABLED
        self.client_stack_restart_enabled = config.CLIENT_STACK_RESTART_ENABLED
        self.max_connected_clients_num = config.MAX_CONNECTED_CLIENTS_NUM
        self.postrestart_wait_time = config.STACK_POSTRESTART_WAIT_TIME
        self.min_stack_restart_timeout = config.MIN_STACK_RESTART_TIMEOUT
        self.max_stack_restart_time_deviation = config.MAX_STACK_RESTART_TIME_DEVIATION
        self.listenerQuota = config.CLIENT_TO_NODE_STACK_QUOTA
        self.listenerSize = config.CLIENT_TO_NODE_STACK_SIZE

        if self.track_connected_clients_num_enabled:
            logger.info(
                '{}: clients connections tracking is enabled.'.format(self))
            self.init_connections_tracking_params()
        if self.client_stack_restart_enabled:
            logger.info('{}: client stack restart is enabled.'.format(self))
            self.init_stack_restart_params()
Exemple #18
0
    def __init__(self,
                 stackParams: Dict,
                 msgHandler: Callable,
                 seed=None,
                 onlyListener=False,
                 sighex: str = None,
                 config=None,
                 msgRejectHandler=None,
                 create_listener_monitor=False,
                 metrics=NullMetricsCollector(),
                 mt_incoming_size=None,
                 mt_outgoing_size=None,
                 timer=None):

        # TODO: sighex is unused as of now, remove once test is removed or
        # maybe use sighex to generate all keys, DECISION DEFERRED

        self.stackParams = stackParams
        self.msgHandler = msgHandler

        # TODO: Ignoring `main` param as of now which determines
        # if the stack will have a listener socket or not.
        name = stackParams['name']
        ha = stackParams['ha']
        basedirpath = stackParams['basedirpath']
        queue_size = stackParams[
            'queue_size'] if 'queue_size' in stackParams else 0

        auto = stackParams.pop('auth_mode', None)
        restricted = auto != AuthMode.ALLOW_ANY.value
        super().__init__(name,
                         ha,
                         basedirpath,
                         msgHandler=self.msgHandler,
                         restricted=restricted,
                         seed=seed,
                         onlyListener=onlyListener,
                         config=config,
                         msgRejectHandler=msgRejectHandler,
                         queue_size=queue_size,
                         create_listener_monitor=create_listener_monitor,
                         metrics=metrics,
                         mt_incoming_size=mt_incoming_size,
                         mt_outgoing_size=mt_outgoing_size,
                         timer=timer)
    def __init__(self, tmpdir, config=None):
        self.basedirpath = tmpdir
        self.name = 'Node1'
        self.internal_bus = InternalBus()
        self.db_manager = DatabaseManager()
        self.timer = QueueTimer()
        self.f = 1
        self.replicas = dict()
        self.requests = Requests()
        self.rank = None
        self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
        self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
        self.nodeIds = []
        self.totalNodes = len(self.allNodeNames)
        self.mode = Mode.starting
        self.config = config or getConfigOnce()
        self.nodeStatusDB = None
        self.replicas = {
            0: Replica(node=self, instId=0, isMaster=True, config=self.config),
            1: Replica(node=self, instId=1, isMaster=False,
                       config=self.config),
            2: Replica(node=self, instId=2, isMaster=False,
                       config=self.config),
        }
        self._found = False
        self.ledgerManager = LedgerManager(self)
        ledger0 = FakeLedger(0, 10)
        ledger1 = FakeLedger(1, 5)
        self.ledgerManager.addLedger(0, ledger0)
        self.ledgerManager.addLedger(1, ledger1)
        self.quorums = Quorums(self.totalNodes)
        self.view_changer = create_view_changer(self)
        self.elector = PrimarySelector(self)
        self.metrics = NullMetricsCollector()

        # For catchup testing
        self.catchup_rounds_without_txns = 0
        self.view_change_in_progress = False
        self.ledgerManager.last_caught_up_3PC = (0, 0)
        self.master_last_ordered_3PC = (0, 0)
        self.seqNoDB = {}

        # callbacks
        self.onBatchCreated = lambda self, *args, **kwargs: True
    def __init__(
            self,
            data: ConsensusSharedData,
            bus: InternalBus,
            network: ExternalBus,
            stasher: StashingRouter,
            db_manager: DatabaseManager,
            metrics: MetricsCollector = NullMetricsCollector(),
    ):
        self._data = data
        self._bus = bus
        self._network = network
        self._checkpoint_state = SortedDict(lambda k: k[1])
        self._stasher = stasher
        self._subscription = Subscription()
        self._validator = CheckpointMsgValidator(self._data)
        self._db_manager = db_manager
        self.metrics = metrics

        # Stashed checkpoints for each view. The key of the outermost
        # dictionary is the view_no, value being a dictionary with key as the
        # range of the checkpoint and its value again being a mapping between
        # senders and their sent checkpoint
        # Dict[view_no, Dict[(seqNoStart, seqNoEnd),  Dict[sender, Checkpoint]]]
        self._stashed_recvd_checkpoints = {}

        self._config = getConfig()
        self._logger = getlogger()

        self._subscription.subscribe(stasher, Checkpoint,
                                     self.process_checkpoint)

        self._subscription.subscribe(bus, Ordered, self.process_ordered)
        self._subscription.subscribe(bus, BackupSetupLastOrdered,
                                     self.process_backup_setup_last_ordered)
        self._subscription.subscribe(bus, NewViewAccepted,
                                     self.process_new_view_accepted)
Exemple #21
0
 def __init__(self, metrics: MetricsCollector = NullMetricsCollector()):
     self.requests = Requests()
     self.requested_propagates_for = OrderedSet()
     self.metrics = metrics
Exemple #22
0
    def __init__(self,
                 node: 'plenum.server.node.Node',
                 instId: int,
                 config=None,
                 isMaster: bool = False,
                 bls_bft_replica: BlsBftReplica = None,
                 metrics: MetricsCollector = NullMetricsCollector(),
                 get_current_time=None,
                 get_time_for_3pc_batch=None):
        """
        Create a new replica.

        :param node: Node on which this replica is located
        :param instId: the id of the protocol instance the replica belongs to
        :param isMaster: is this a replica of the master protocol instance
        """
        HasActionQueue.__init__(self)
        self.get_current_time = get_current_time or time.perf_counter
        self.get_time_for_3pc_batch = get_time_for_3pc_batch or node.utc_epoch
        # self.stats = Stats(TPCStat)
        self.config = config or getConfig()
        self.metrics = metrics
        self.node = node
        self.instId = instId
        self.name = self.generateName(node.name, self.instId)
        self.logger = getlogger(self.name)
        self.validator = ReplicaValidator(self)

        self.outBox = deque()
        """
        This queue is used by the replica to send messages to its node. Replica
        puts messages that are consumed by its node
        """

        self.inBox = deque()
        """
        This queue is used by the replica to receive messages from its node.
        Node puts messages that are consumed by the replica
        """

        self.inBoxStash = deque()
        """
        If messages need to go back on the queue, they go here temporarily and
        are put back on the queue on a state change
        """

        self._is_master = isMaster

        # Dictionary to keep track of the which replica was primary during each
        # view. Key is the view no and value is the name of the primary
        # replica during that view
        self.primaryNames = OrderedDict()  # type: OrderedDict[int, str]

        # Flag being used for preterm exit from the loop in the method
        # `processStashedMsgsForNewWaterMarks`. See that method for details.
        self.consumedAllStashedMsgs = True

        self._freshness_checker = FreshnessChecker(
            freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL)

        self._bls_bft_replica = bls_bft_replica
        self._state_root_serializer = state_roots_serializer

        # Did we log a message about getting request while absence of primary
        self.warned_no_primary = False

        self._consensus_data = ConsensusSharedData(
            self.name, self.node.poolManager.node_names_ordered_by_rank(),
            self.instId, self.isMaster)
        self._internal_bus = InternalBus()
        self._external_bus = ExternalBus(send_handler=self.send)
        self.stasher = self._init_replica_stasher()
        self._subscription = Subscription()
        self._bootstrap_consensus_data()
        self._subscribe_to_external_msgs()
        self._subscribe_to_internal_msgs()
        self._checkpointer = self._init_checkpoint_service()
        self._ordering_service = self._init_ordering_service()
        self._message_req_service = self._init_message_req_service()
        self._view_change_service = self._init_view_change_service()
        for ledger_id in self.ledger_ids:
            self.register_ledger(ledger_id)
Exemple #23
0
def create_fake_catchup_rep_service(ledger: Ledger):
    class FakeCatchupProvider(CatchupDataProvider):
        def __init__(self, ledger):
            self._ledger = ledger

        def all_nodes_names(self):
            pass

        def node_name(self) -> str:
            pass

        def ledgers(self) -> List[int]:
            return [0]

        def ledger(self, ledger_id: int) -> Ledger:
            if ledger_id == 0:
                return self._ledger

        def verifier(self, ledger_id: int) -> MerkleVerifier:
            pass

        def eligible_nodes(self) -> List[str]:
            pass

        def three_phase_key_for_txn_seq_no(self, ledger_id: int,
                                           seq_no: int) -> Tuple[int, int]:
            pass

        def update_txn_with_extra_data(self, txn: dict) -> dict:
            pass

        def transform_txn_for_ledger(self, txn: dict) -> dict:
            pass

        def notify_catchup_start(self, ledger_id: int):
            pass

        def notify_catchup_complete(self, ledger_id: int,
                                    last_3pc: Tuple[int, int]):
            pass

        def notify_transaction_added_to_ledger(self, ledger_id: int,
                                               txn: dict):
            pass

        def send_to(self,
                    msg: Any,
                    to: str,
                    message_splitter: Optional[Callable] = None):
            pass

        def send_to_nodes(self, msg: Any):
            pass

        def blacklist_node(self, node_name: str, reason: str):
            pass

        def discard(self,
                    msg,
                    reason,
                    logMethod=logging.error,
                    cliOutput=False):
            pass

    _, input_rx = create_direct_channel()
    provider = FakeCatchupProvider(ledger)
    service = CatchupRepService(ledger_id=0,
                                config=None,
                                input=input_rx,
                                output=None,
                                timer=None,
                                metrics=NullMetricsCollector(),
                                provider=provider)

    return service
Exemple #24
0
    def __init__(self, name, ha, basedirpath, msgHandler, restricted=True,
                 seed=None, onlyListener=False, config=None, msgRejectHandler=None, queue_size=0,
                 create_listener_monitor=False, metrics=NullMetricsCollector(),
                 mt_incoming_size=None, mt_outgoing_size=None, timer=None):
        self._name = name
        self.ha = ha
        self.basedirpath = basedirpath
        self.msgHandler = msgHandler
        self.seed = seed
        self.queue_size = queue_size
        self.config = config or getConfig()
        self.msgRejectHandler = msgRejectHandler or self.__defaultMsgRejectHandler

        self.metrics = metrics
        self.mt_incoming_size = mt_incoming_size
        self.mt_outgoing_size = mt_outgoing_size

        self.listenerQuota = self.config.DEFAULT_LISTENER_QUOTA
        self.listenerSize = self.config.DEFAULT_LISTENER_SIZE
        self.senderQuota = self.config.DEFAULT_SENDER_QUOTA
        self.msgLenVal = MessageLenValidator(self.config.MSG_LEN_LIMIT)

        self.homeDir = None
        # As of now there would be only one file in secretKeysDir and sigKeyDir
        self.publicKeysDir = None
        self.secretKeysDir = None
        self.verifKeyDir = None
        self.sigKeyDir = None

        self.signer = None
        self.verifiers = {}

        self.setupDirs()
        self.setupOwnKeysIfNeeded()
        self.setupSigning()

        # self.poller = test.asyncio.Poller()

        self.restricted = restricted

        self.ctx = None  # type: Context
        self.listener = None
        self.create_listener_monitor = create_listener_monitor
        self.listener_monitor = None
        self.auth = None

        # Each remote is identified uniquely by the name
        self._remotes = {}  # type: Dict[str, Remote]

        self.remotesByKeys = {}

        # Indicates if this stack will maintain any remotes or will
        # communicate simply to listeners. Used in ClientZStack
        self.onlyListener = onlyListener

        self._conns = set()  # type: Set[str]

        self.rxMsgs = deque()
        self._created = time.perf_counter()

        self.last_heartbeat_at = None

        self._stashed_to_disconnected = {}
        self._stashed_pongs = set()
        self._received_pings = set()
        self._client_message_provider = ClientMessageProvider(self.name,
                                                              self.config,
                                                              self.prepare_to_send,
                                                              self.metrics,
                                                              self.mt_outgoing_size,
                                                              timer)