def fake_node(tconf):
    node = FakeSomething(config=tconf,
                         timer=QueueTimer(),
                         nodeStatusDB=None,
                         master_replica=FakeSomething(inBox=deque(),
                                                      inBoxRouter=Router(),
                                                      _external_bus=MockNetwork(),
                                                      internal_bus=InternalBus(),
                                                      logger=FakeSomething(
                                                          info=lambda *args, **kwargs: True
                                                      )),
                         name="Alpha",
                         master_primary_name="Alpha",
                         on_view_change_start=lambda *args, **kwargs: True,
                         start_catchup=lambda *args, **kwargs: True,
                         nodeInBox=deque(),
                         nodeMsgRouter=Router(),
                         metrics=None,
                         process_one_node_message=None,
                         quota_control=FakeSomething(
                             node_quota=Quota(count=100,
                                              size=100)),
                         nodestack=FakeSomething(
                             service=lambda *args, **kwargs: eventually(lambda: True)),
                         set_view_for_replicas= lambda view_no: None,
                         set_view_change_status=lambda view_no: None
                         )
    node.metrics = functools.partial(Node._createMetricsCollector, node)()
    node.process_one_node_message = functools.partial(Node.process_one_node_message, node)
    return node
示例#2
0
def view_changer(tconf):
    node = FakeSomething(
        config=tconf,
        master_replica=FakeSomething(
            inBox=deque(),
            inBoxRouter=Router(),
            logger=FakeSomething(info=lambda *args, **kwargs: True)),
        name="Alpha",
        master_primary_name="Alpha",
        on_view_change_start=lambda *args, **kwargs: True,
        start_catchup=lambda *args, **kwargs: True,
        nodeInBox=deque(),
        nodeMsgRouter=Router(),
        metrics=None,
        process_one_node_message=None,
        quota_control=FakeSomething(node_quota=Quota(count=100, size=100)),
        nodestack=FakeSomething(
            service=lambda *args, **kwargs: eventually(lambda: True)))
    node.metrics = functools.partial(Node._createMetricsCollector, node)()
    node.process_one_node_message = functools.partial(
        Node.process_one_node_message, node)
    view_changer = ViewChanger(node)
    node.view_changer = view_changer
    node.viewNo = view_changer.view_no
    node.master_replica.node = node
    return view_changer
示例#3
0
    def __init__(self, node) -> None:
        self._node = node
        super().__init__([ObserverSyncPolicyEachBatch(self._node)])
        HasActionQueue.__init__(self)

        self._inbox = deque()
        self._inbox_router = Router((ObservedData, self.apply_data), )
示例#4
0
 def __init__(self,
              name: str,
              nodeReg: Dict[str, HA] = None,
              ha: Union[HA, Tuple[str, int]] = None,
              peerHA: Union[HA, Tuple[str, int]] = None,
              basedirpath: str = None,
              config=None,
              sighex: str = None):
     config = config or getConfig()
     super().__init__(name, nodeReg, ha, basedirpath, config, sighex)
     self.graphStore = self.getGraphStore()
     self.autoDiscloseAttributes = False
     self.requestedPendingTxns = False
     self.hasAnonCreds = bool(peerHA)
     if self.hasAnonCreds:
         self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA)
         stackargs = dict(name=self.stackName,
                          ha=peerHA,
                          main=True,
                          auto=AutoMode.always)
         self.peerMsgRoutes = []
         self.peerMsgRouter = Router(*self.peerMsgRoutes)
         self.peerStack = SimpleStack(stackargs,
                                      msgHandler=self.handlePeerMessage)
         self.peerStack.sign = self.sign
         self.peerInbox = deque()
     self._observers = {}  # type Dict[str, Callable]
     self._observerSet = set(
     )  # makes it easier to guard against duplicates
示例#5
0
    def __init__(self, node):
        super().__init__(node)

        # Flag variable which indicates which replica has nominated for itself
        self.replicaNominatedForItself = None

        self.nominations = {}

        self.primaryDeclarations = {}

        self.scheduledPrimaryDecisions = {}

        self.reElectionProposals = {}

        self.reElectionRounds = {}

        # # Tracks when election started for each instance, once
        # # `MaxElectionTimeoutFactor`*node_count elapses and no primary decided,
        # # re-start election
        # self.election_start_times = {}

        routerArgs = [(Nomination, self.processNominate),
                      (Primary, self.processPrimary),
                      (Reelection, self.processReelection)]
        self.inBoxRouter = Router(*routerArgs)

        # Keeps track of duplicate messages received. Used to blacklist if
        # nodes send more than 1 duplicate messages. Useful to blacklist
        # nodes. This number `1` is configurable. The reason 1 duplicate
        # message is tolerated is because sometimes when a node communicates
        # to an already lagged node, an extra NOMINATE or PRIMARY might be sent
        self.duplicateMsgs = {}  # Dict[Tuple, int]
示例#6
0
    def __init__(self, provider: ViewChangerDataProvider, timer: TimerService):
        self.provider = provider
        self._timer = timer

        self.inBox = deque()
        self.outBox = deque()
        self.inBoxRouter = Router(
            (InstanceChange, self.process_instance_change_msg)
        )

        self.instance_changes = InstanceChangeProvider(self.config.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL,
                                                       node_status_db=self.provider.node_status_db)

        self.previous_view_no = None

        # Action for _schedule instanceChange messages
        self.instance_change_action = None

        # Count of instance change rounds
        self.instance_change_rounds = 0

        # Time for view_change_starting
        self.start_view_change_ts = 0

        # Force periodic view change if enabled in config
        force_view_change_freq = self.config.ForceViewChangeFreq
        if force_view_change_freq > 0:
            RepeatingTimer(self._timer, force_view_change_freq, self.on_master_degradation)

        # Start periodic freshness check
        state_freshness_update_interval = self.config.STATE_FRESHNESS_UPDATE_INTERVAL
        if state_freshness_update_interval > 0:
            RepeatingTimer(self._timer, state_freshness_update_interval, self.check_freshness)
示例#7
0
    def __init__(self, node):
        super().__init__(node)

        # TODO: How does primary decider ensure that a node does not have a
        # primary while its catching up
        self.node = node

        self.replicaNominatedForItself = None
        """Flag variable which indicates which replica has nominated
        for itself"""

        self.nominations = {}

        self.primaryDeclarations = {}

        self.scheduledPrimaryDecisions = {}

        self.reElectionProposals = {}

        self.reElectionRounds = {}

        routerArgs = [(Nomination, self.processNominate),
                      (Primary, self.processPrimary),
                      (Reelection, self.processReelection)]
        self.inBoxRouter = Router(*routerArgs)

        self.pendingMsgsForViews = {}  # Dict[int, deque]

        # Keeps track of duplicate messages received. Used to blacklist if
        # nodes send more than 1 duplicate messages. Useful to blacklist
        # nodes. This number `1` is configurable. The reason 1 duplicate
        # message is tolerated is because sometimes when a node communicates
        # to an already lagged node, an extra NOMINATE or PRIMARY might be sent
        self.duplicateMsgs = {}   # Dict[Tuple, int]
示例#8
0
    def __init__(self, provider: ViewChangerDataProvider, timer: TimerService):
        self.provider = provider
        self._timer = timer
        self.pre_vc_strategy = None

        self._view_no = 0  # type: int

        self.inBox = deque()
        self.outBox = deque()
        self.inBoxRouter = Router(
            (InstanceChange, self.process_instance_change_msg),
            (ViewChangeDone, self.process_vchd_msg),
            (FutureViewChangeDone, self.process_future_view_vchd_msg))

        self.instance_changes = InstanceChangeProvider(
            self.config.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL,
            node_status_db=self.provider.node_status_db)

        # Tracks if other nodes are indicating that this node is in lower view
        # than others. Keeps a map of view no to senders
        # TODO: Consider if sufficient ViewChangeDone for 2 different (and
        # higher views) are received, should one view change be interrupted in
        # between.
        self._next_view_indications = {}

        self._view_change_in_progress = False
        self.pre_view_change_in_progress = False

        self.previous_view_no = None
        self.previous_master_primary = None

        self.set_defaults()

        self.initInsChngThrottling()

        # Action for _schedule instanceChange messages
        self.instance_change_action = None

        # Count of instance change rounds
        self.instance_change_rounds = 0

        # Time for view_change_starting
        self.start_view_change_ts = 0

        # Last successful viewNo.
        # In some cases view_change process can be uncompleted in time.
        # In that case we want to know, which viewNo was successful (last completed view_change)
        self.last_completed_view_no = 0

        # Force periodic view change if enabled in config
        force_view_change_freq = self.config.ForceViewChangeFreq
        if force_view_change_freq > 0:
            RepeatingTimer(self._timer, force_view_change_freq,
                           self.on_master_degradation)

        # Start periodic freshness check
        state_freshness_update_interval = self.config.STATE_FRESHNESS_UPDATE_INTERVAL
        if state_freshness_update_interval > 0:
            RepeatingTimer(self._timer, state_freshness_update_interval,
                           self.check_freshness)
示例#9
0
    def __init__(self,
                 name: str = None,
                 nodeReg: Dict[str, HA] = None,
                 ha: Union[HA, Tuple[str, int]] = None,
                 peerHA: Union[HA, Tuple[str, int]] = None,
                 basedirpath: str = None,
                 config=None,
                 sighex: str = None):
        config = config or getConfig()
        super().__init__(name, nodeReg, ha, basedirpath, config, sighex)
        self.autoDiscloseAttributes = False
        self.requestedPendingTxns = False
        self.hasAnonCreds = bool(peerHA)
        if self.hasAnonCreds:
            self.peerHA = peerHA if isinstance(peerHA, HA) else HA(*peerHA)

            stackargs = dict(name=self.stackName,
                             ha=peerHA,
                             main=True,
                             auth_mode=AuthMode.ALLOW_ANY.value)

            self.peerMsgRoutes = []
            self.peerMsgRouter = Router(*self.peerMsgRoutes)
            self.peerStack = self.peerStackClass(
                stackargs, msgHandler=self.handlePeerMessage)
            self.peerStack.sign = self.sign
            self.peerInbox = deque()

        # To let client send this transactions to just one node
        self._read_only_requests = {
            GET_NYM, GET_ATTR, GET_CLAIM_DEF, GET_SCHEMA
        }
示例#10
0
    def __init__(self, node):
        self.node = node

        self._view_no = 0  # type: int

        HasActionQueue.__init__(self)

        self.inBox = deque()
        self.outBox = deque()
        self.inBoxRouter = Router(
            (InstanceChange, self.process_instance_change_msg),
            (ViewChangeDone, self.process_vchd_msg),
            (FutureViewChangeDone, self.process_future_view_vchd_msg))

        self.instanceChanges = InstanceChanges()

        # The quorum of `ViewChangeDone` msgs is different depending on whether we're doing a real view change,
        # or just propagating view_no and Primary from `CurrentState` messages sent to a newly joined Node.
        # TODO: separate real view change and Propagation of Primary
        # TODO: separate catch-up, view-change and primary selection so that
        # they are really independent.
        self.propagate_primary = False

        # Tracks if other nodes are indicating that this node is in lower view
        # than others. Keeps a map of view no to senders
        # TODO: Consider if sufficient ViewChangeDone for 2 different (and
        # higher views) are received, should one view change be interrupted in
        # between.
        self._next_view_indications = {}

        self._view_change_in_progress = False

        self.previous_master_primary = None

        self.set_defaults()

        self.initInsChngThrottling()

        # Action for _schedule instanceChange messages
        self.instance_change_action = None

        # Count of instance change rounds
        self.instance_change_rounds = 0

        # Time for view_change_starting
        self.start_view_change_ts = 0

        # Last successful viewNo.
        # In some cases view_change process can be uncompleted in time.
        # In that case we want to know, which viewNo was successful (last completed view_change)
        self.last_completed_view_no = 0

        # Force periodic view change if enabled in config
        force_view_change_freq = node.config.ForceViewChangeFreq
        if force_view_change_freq > 0:
            self.startRepeating(self.on_master_degradation,
                                force_view_change_freq)
示例#11
0
    def __init__(self) -> None:
        HasActionQueue.__init__(self)
        self._inbox = deque()
        self._outbox = deque()
        self._inbox_router = Router((BatchCommitted, self.process_new_batch), )

        # TODO: support other policies
        self.__sync_policies = {
            ObserverSyncPolicyType.EACH_BATCH:
            ObservableSyncPolicyEachBatch(self)
        }
示例#12
0
    def __init__(self, node):
        HasActionQueue.__init__(self)
        self.node = node

        self.name = node.name
        self.replicas = node.replicas
        self.nodeCount = 0
        self.inBox = deque()
        self.outBox = deque()
        self.inBoxRouter = Router(*self.routes)

        # Need to keep track of who was primary for the master protocol
        # instance for previous view, this variable only matters between
        # elections, the elector will set it before doing triggering new
        # election and will reset it after primary is decided for master
        # instance
        self.previous_master_primary = None
示例#13
0
    def __init__(self, node):
        self.node = node

        self.view_no = 0  # type: int

        HasActionQueue.__init__(self)

        self.inBox = deque()
        self.outBox = deque()
        self.inBoxRouter = Router(
            (InstanceChange, self.process_instance_change_msg),
            (ViewChangeDone, self.process_vchd_msg)
        )

        self.instanceChanges = InstanceChanges()

        # The quorum of `ViewChangeDone` msgs is different depending on whether we're doing a real view change,
        # or just propagating view_no and Primary from `CurrentState` messages sent to a newly joined Node.
        # TODO: separate real view change and Propagation of Primary
        # TODO: separate catch-up, view-change and primary selection so that
        # they are really independent.
        self.propagate_primary = False

        # Tracks if other nodes are indicating that this node is in lower view
        # than others. Keeps a map of view no to senders
        # TODO: Consider if sufficient ViewChangeDone for 2 different (and
        # higher views) are received, should one view change be interrupted in
        # between.
        self._next_view_indications = SortedDict()

        self._view_change_in_progress = False

        self.previous_master_primary = None

        self.set_defaults()

        self.initInsChngThrottling()

        # Action for _schedule instanceChange messages
        self.instance_change_action = None

        # Count of instance change rounds
        self.instance_change_rounds = 0
示例#14
0
文件: replica.py 项目: loxadim/plenum
    def __init__(self,
                 node: 'plenum.server.node.Node',
                 instId: int,
                 isMaster: bool = False):
        """
        Create a new replica.

        :param node: Node on which this replica is located
        :param instId: the id of the protocol instance the replica belongs to
        :param isMaster: is this a replica of the master protocol instance
        """
        HasActionQueue.__init__(self)
        self.stats = Stats(TPCStat)

        self.config = getConfig()

        routerArgs = [(ReqDigest, self._preProcessReqDigest)]

        for r in [PrePrepare, Prepare, Commit]:
            routerArgs.append((r, self.processThreePhaseMsg))

        routerArgs.append((Checkpoint, self.processCheckpoint))
        routerArgs.append((ThreePCState, self.process3PhaseState))

        self.inBoxRouter = Router(*routerArgs)

        self.threePhaseRouter = Router((PrePrepare, self.processPrePrepare),
                                       (Prepare, self.processPrepare),
                                       (Commit, self.processCommit))

        self.node = node
        self.instId = instId

        self.name = self.generateName(node.name, self.instId)

        self.outBox = deque()
        """
        This queue is used by the replica to send messages to its node. Replica
        puts messages that are consumed by its node
        """

        self.inBox = deque()
        """
        This queue is used by the replica to receive messages from its node.
        Node puts messages that are consumed by the replica
        """

        self.inBoxStash = deque()
        """
        If messages need to go back on the queue, they go here temporarily and
        are put back on the queue on a state change
        """

        self.isMaster = isMaster

        # Indicates name of the primary replica of this protocol instance.
        # None in case the replica does not know who the primary of the
        # instance is
        self._primaryName = None  # type: Optional[str]

        # Requests waiting to be processed once the replica is able to decide
        # whether it is primary or not
        self.postElectionMsgs = deque()

        # PRE-PREPAREs that are waiting to be processed but do not have the
        # corresponding request digest. Happens when replica has not been
        # forwarded the request by the node but is getting 3 phase messages.
        # The value is a list since a malicious entry might send PRE-PREPARE
        # with a different digest and since we dont have the request finalised,
        # we store all PRE-PPREPARES
        self.prePreparesPendingReqDigest = {
        }  # type: Dict[Tuple[str, int], List]

        # PREPAREs that are stored by non primary replica for which it has not
        #  got any PRE-PREPARE. Dictionary that stores a tuple of view no and
        #  prepare sequence number as key and a deque of PREPAREs as value.
        # This deque is attempted to be flushed on receiving every
        # PRE-PREPARE request.
        self.preparesWaitingForPrePrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # COMMITs that are stored for which there are no PRE-PREPARE or PREPARE
        # received
        self.commitsWaitingForPrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # Dictionary of sent PRE-PREPARE that are stored by primary replica
        # which it has broadcasted to all other non primary replicas
        # Key of dictionary is a 2 element tuple with elements viewNo,
        # pre-prepare seqNo and value is a tuple of Request Digest and time
        self.sentPrePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
        # element tuple with elements viewNo, pre-prepare seqNo and value is
        # a tuple of Request Digest and time
        self.prePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received Prepare requests. Key of dictionary is a 2
        # element tuple with elements viewNo, seqNo and value is a 2 element
        # tuple containing request digest and set of sender node names(sender
        # replica names in case of multiple protocol instances)
        # (viewNo, seqNo) -> ((identifier, reqId), {senders})
        self.prepares = Prepares()
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], Set[str]]]

        self.commits = Commits()  # type: Dict[Tuple[int, int],
        # Tuple[Tuple[str, int], Set[str]]]

        # Set of tuples to keep track of ordered requests. Each tuple is
        # (viewNo, ppSeqNo)
        self.ordered = OrderedSet()  # type: OrderedSet[Tuple[int, int]]

        # Dictionary to keep track of the which replica was primary during each
        # view. Key is the view no and value is the name of the primary
        # replica during that view
        self.primaryNames = {}  # type: Dict[int, str]

        # Holds msgs that are for later views
        self.threePhaseMsgsForLaterView = deque()
        # type: deque[(ThreePhaseMsg, str)]

        # Holds tuple of view no and prepare seq no of 3-phase messages it
        # received while it was not participating
        self.stashingWhileCatchingUp = set()  # type: Set[Tuple]

        # Commits which are not being ordered since commits with lower view
        # numbers and sequence numbers have not been ordered yet. Key is the
        # viewNo and value a map of pre-prepare sequence number to commit
        self.stashedCommitsForOrdering = {}  # type: Dict[int,
        # Dict[int, Commit]]

        self.checkpoints = SortedDict(lambda k: k[0])

        self.stashingWhileOutsideWaterMarks = deque()

        # Low water mark
        self._h = 0  # type: int

        # High water mark
        self.H = self._h + self.config.LOG_SIZE  # type: int

        self.lastPrePrepareSeqNo = self.h  # type: int
示例#15
0
    def __init__(self,
                 node: 'plenum.server.node.Node',
                 instId: int,
                 isMaster: bool = False):
        """
        Create a new replica.

        :param node: Node on which this replica is located
        :param instId: the id of the protocol instance the replica belongs to
        :param isMaster: is this a replica of the master protocol instance
        """
        super().__init__()
        self.stats = Stats(TPCStat)

        routerArgs = [(ReqDigest, self._preProcessReqDigest)]

        for r in [PrePrepare, Prepare, Commit]:
            routerArgs.append((r, self.processThreePhaseMsg))
        self.inBoxRouter = Router(*routerArgs)

        self.threePhaseRouter = Router((PrePrepare, self.processPrePrepare),
                                       (Prepare, self.processPrepare),
                                       (Commit, self.processCommit))

        self.node = node
        self.instId = instId

        self.name = self.generateName(node.name, self.instId)

        self.outBox = deque()
        """
        This queue is used by the replica to send messages to its node. Replica
        puts messages that are consumed by its node
        """

        self.inBox = deque()
        """
        This queue is used by the replica to receive messages from its node.
        Node puts messages that are consumed by the replica
        """

        self.inBoxStash = deque()
        """
        If messages need to go back on the queue, they go here temporarily and
        are put back on the queue on a state change
        """

        self.isMaster = isMaster

        # Indicates name of the primary replica of this protocol instance.
        # None in case the replica does not know who the primary of the
        # instance is
        self._primaryName = None  # type: Optional[str]

        # Requests waiting to be processed once the replica is able to decide
        # whether it is primary or not
        self.postElectionMsgs = deque()

        # Requests that are stored by non primary replica for which it is
        # expecting corresponding pre prepare requests Dictionary that stores
        # a tuple of client id and request id(sequence no) as key and digest as
        # value. Not creating a set of Tuple3(identifier, reqId, digest) as such a
        # big hashable element is not good. Also this way we can look for the
        # request on the basis of (identifier, reqId) and compare the digest with
        # the received PrePrepare request's digest.
        self.reqsPendingPrePrepare = {}
        # type: Dict[Tuple[str, int], str]

        # PREPARE that are stored by non primary replica for which it has not
        #  got any PRE-PREPARE. Dictionary that stores a tuple of view no and
        #  prepare sequence number as key and a deque of PREPAREs as value.
        # This deque is attempted to be flushed on receiving every
        # PRE-PREPARE request.
        self.preparesWaitingForPrePrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # Dictionary of sent PRE-PREPARE that are stored by primary replica
        # which it has broadcasted to all other non primary replicas
        # Key of dictionary is a 2 element tuple with elements viewNo,
        # pre-prepare seqNo and value is a Request Digest
        self.sentPrePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[ReqDigest, float]]

        # Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
        # element tuple with elements viewNo, pre-prepare seqNo and value is
        # a Request Digest
        self.prePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[ReqDigest, float]]

        self.prePrepareSeqNo = 0  # type: int

        # Dictionary of received Prepare requests. Key of dictionary is a 2
        # element tuple with elements viewNo, seqNo and value is a 2 element
        # tuple containing request digest and set of sender node names(sender
        # replica names in case of multiple protocol instances)
        # (viewNo, seqNo) -> (digest, {senders})
        self.prepares = Prepares()
        # type: Dict[Tuple[int, int], Tuple[str, Set[str]]]

        self.commits = Commits()

        # Set of tuples to keep track of ordered requests
        self.ordered = set()  # type: Set[Tuple[int, int]]

        # Requests with sufficient commits so they can be ordered but have not
        # received request digest from node and neither PRE-PREPARE or PREPARE.
        # Key can be a digest to a tuple of viewNo and ppSeqNo
        self.commitsPendedForOrdering = {}

        # Dictionary to keep track of the which replica was primary during each
        # view. Key is the view no and value is the name of the primary
        # replica during that view
        self.primaryNames = {}  # type: Dict[int, str]

        # Holds msgs that are for later views
        self.threePhaseMsgsForLaterView = deque()
        # type: deque[(ThreePhaseMsg, str)]

        # Holds tuple of view no and prepare seq no of 3-phase messages it
        # received while it was not participating
        self.stashingWhileCatchingUp = set()  # type: Set[Tuple]