コード例 #1
0
    def __init__(self, data: ConsensusSharedData, timer: TimerService, bus: InternalBus, network: ExternalBus,
                 stasher: StashingRouter):
        self._config = getConfig()
        self._logger = getlogger()

        self._data = data
        self._new_view_builder = NewViewBuilder(self._data)
        self._timer = timer
        self._bus = bus
        self._network = network
        self._router = stasher
        self._new_view = None  # type: Optional[NewView]
        self._resend_inst_change_timer = RepeatingTimer(self._timer,
                                                        self._config.NEW_VIEW_TIMEOUT,
                                                        partial(self._propose_view_change,
                                                                Suspicions.INSTANCE_CHANGE_TIMEOUT.code),
                                                        active=False)

        self._router.subscribe(ViewChange, self.process_view_change_message)
        self._router.subscribe(ViewChangeAck, self.process_view_change_ack_message)
        self._router.subscribe(NewView, self.process_new_view_message)

        self._old_prepared = {}  # type: Dict[int, BatchID]
        self._old_preprepared = {}  # type: Dict[int, List[BatchID]]
        self._primaries_selector = RoundRobinPrimariesSelector()

        self._subscription = Subscription()
        self._subscription.subscribe(self._bus, NeedViewChange, self.process_need_view_change)
コード例 #2
0
    def __init__(self, data: ConsensusSharedData, timer: TimerService, bus: InternalBus, network: ExternalBus,
                 stasher: StashingRouter, primaries_selector: PrimariesSelector):
        self._config = getConfig()

        self._data = data
        self._new_view_builder = NewViewBuilder(self._data)
        self._timer = timer
        self._bus = bus
        self._network = network
        self._router = stasher

        # Last successful viewNo.
        # In some cases view_change process can be uncompleted in time.
        # In that case we want to know, which viewNo was successful (last completed view_change)
        self.last_completed_view_no = self._data.view_no

        self._resend_inst_change_timer = RepeatingTimer(self._timer,
                                                        self._config.NEW_VIEW_TIMEOUT,
                                                        self._propose_view_change_not_complete_in_time,
                                                        active=False)

        self._old_prepared = {}  # type: Dict[int, BatchID]
        self._old_preprepared = {}  # type: Dict[int, List[BatchID]]
        self._stashed_vc_msgs = {}  # type: Dict[int, int]
        self._primaries_selector = primaries_selector

        self._subscription = Subscription()
        self._subscription.subscribe(self._router, ViewChange, self.process_view_change_message)
        self._subscription.subscribe(self._router, ViewChangeAck, self.process_view_change_ack_message)
        self._subscription.subscribe(self._router, NewView, self.process_new_view_message)
        self._subscription.subscribe(self._bus, NeedViewChange, self.process_need_view_change)
コード例 #3
0
def expectedPoolCatchupTime(nodeCount):
    """
    From: the consistency proof procedure is finished
    To: each of the Nodes finished the the catchup procedure
    """
    config = getConfig()
    return nodeCount * config.CatchupTransactionsTimeout
コード例 #4
0
ファイル: waits.py プロジェクト: michaeldboyd/indy-plenum
def expectedPoolCatchupTime(nodeCount):
    """
    From: the consistency proof procedure is finished
    To: each of the Nodes finished the the catchup procedure
    """
    config = getConfig()
    return nodeCount * config.CatchupTransactionsTimeout
コード例 #5
0
ファイル: stacked.py プロジェクト: surabhiagrawal89/plenum
    def __init__(self, *args, **kwargs):
        checkPortAvailable(kwargs['ha'])
        basedirpath = kwargs.get('basedirpath')
        keep = RoadKeep(basedirpath=basedirpath,
                        stackname=kwargs['name'],
                        auto=kwargs.get('auto'),
                        baseroledirpath=basedirpath)  # type: RoadKeep
        kwargs['keep'] = keep
        localRoleData = keep.loadLocalRoleData()

        sighex = kwargs.pop('sighex', None) or localRoleData['sighex']
        if not sighex:
            (sighex, _), (prihex, _) = getEd25519AndCurve25519Keys()
        else:
            prihex = ed25519SkToCurve25519(sighex, toHex=True)
        kwargs['sigkey'] = sighex
        kwargs['prikey'] = prihex
        self.msgHandler = kwargs.pop('msgHandler', None)  # type: Callable
        super().__init__(*args, **kwargs)
        if self.ha[1] != kwargs['ha'].port:
            error("the stack port number has changed, likely due to "
                  "information in the keep. {} passed {}, actual {}".format(
                      kwargs['name'], kwargs['ha'].port, self.ha[1]))
        self.created = time.perf_counter()
        self.coro = None
        config = getConfig()
        try:
            self.messageTimeout = config.RAETMessageTimeout
        except AttributeError:
            # if no timeout is set then message will never timeout
            self.messageTimeout = 0
コード例 #6
0
    def __init__(self, name: str, validators: List[str], primary_name: str,
                 timer: TimerService, bus: InternalBus, network: ExternalBus,
                 write_manager: WriteRequestManager,
                 bls_bft_replica: BlsBftReplica=None):
        self._data = ConsensusSharedData(name, validators, 0)
        self._data.primary_name = primary_name
        config = getConfig()
        stasher = StashingRouter(config.REPLICA_STASH_LIMIT, buses=[bus, network])
        self._orderer = OrderingService(data=self._data,
                                        timer=timer,
                                        bus=bus,
                                        network=network,
                                        write_manager=write_manager,
                                        bls_bft_replica=bls_bft_replica,
                                        freshness_checker=FreshnessChecker(
                                            freshness_timeout=config.STATE_FRESHNESS_UPDATE_INTERVAL),
                                        stasher=stasher)
        self._checkpointer = CheckpointService(self._data, bus, network, stasher,
                                               write_manager.database_manager)
        self._view_changer = ViewChangeService(self._data, timer, bus, network, stasher)

        # TODO: This is just for testing purposes only
        self._data.checkpoints.append(
            Checkpoint(instId=0, viewNo=0, seqNoStart=0, seqNoEnd=0,
                       digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw'))
コード例 #7
0
    def __init__(self,
                 data: ConsensusSharedData,
                 bus: InternalBus,
                 network: ExternalBus,
                 stasher: StashingRouter,
                 db_manager: DatabaseManager,
                 old_stasher: ReplicaStasher,
                 is_master=True):
        self._data = data
        self._bus = bus
        self._network = network
        self._checkpoint_state = SortedDict(lambda k: k[1])
        self._stasher = stasher
        self._is_master = is_master
        self._validator = CheckpointMsgValidator(self._data)
        self._db_manager = db_manager

        # Stashed checkpoints for each view. The key of the outermost
        # dictionary is the view_no, value being a dictionary with key as the
        # range of the checkpoint and its value again being a mapping between
        # senders and their sent checkpoint
        # Dict[view_no, Dict[(seqNoStart, seqNoEnd),  Dict[sender, Checkpoint]]]
        self._stashed_recvd_checkpoints = {}

        self._config = getConfig()
        self._logger = getlogger()

        self._old_stasher = old_stasher
コード例 #8
0
    def __init__(
            self,
            data: ConsensusSharedData,
            bus: InternalBus,
            network: ExternalBus,
            stasher: StashingRouter,
            db_manager: DatabaseManager,
            metrics: MetricsCollector = NullMetricsCollector(),
    ):
        self._data = data
        self._bus = bus
        self._network = network
        self._stasher = stasher
        self._subscription = Subscription()
        self._validator = CheckpointMsgValidator(self._data)
        self._db_manager = db_manager
        self.metrics = metrics

        # Received checkpoints, mapping CheckpointKey -> List(node_alias)
        self._received_checkpoints = defaultdict(
            set)  # type: Dict[CheckpointService.CheckpointKey, Set[str]]

        self._config = getConfig()
        self._logger = getlogger()

        self._subscription.subscribe(stasher, Checkpoint,
                                     self.process_checkpoint)

        self._subscription.subscribe(bus, Ordered, self.process_ordered)
        self._subscription.subscribe(bus, BackupSetupLastOrdered,
                                     self.process_backup_setup_last_ordered)
        self._subscription.subscribe(bus, NewViewAccepted,
                                     self.process_new_view_accepted)
コード例 #9
0
    def __init__(self,
                 data: ConsensusSharedData,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 db_manager: DatabaseManager,
                 stasher: StashingRouter,
                 is_master_degraded: Callable[[], bool],
                 metrics: MetricsCollector = NullMetricsCollector()):
        self._data = data
        self._timer = timer
        self._bus = bus
        self._network = network
        self._stasher = stasher
        self._is_master_degraded = is_master_degraded
        self.metrics = metrics

        self._config = getConfig()

        self._instance_changes = \
            InstanceChangeProvider(outdated_ic_interval=self._config.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL,
                                   node_status_db=db_manager.get_store(NODE_STATUS_DB_LABEL),
                                   time_provider=timer.get_current_time)

        self._subscription = Subscription()
        self._subscription.subscribe(bus, VoteForViewChange, self.process_vote_for_view_change)
        self._subscription.subscribe(bus, NewViewAccepted, self.process_new_view_accepted)
        self._subscription.subscribe(stasher, InstanceChange, self.process_instance_change)
コード例 #10
0
    def __init__(self,
                 owner,
                 ownedByNode: bool = True,
                 postAllLedgersCaughtUp: Optional[Callable] = None,
                 preCatchupClbk: Optional[Callable] = None,
                 ledger_sync_order: Optional[List] = None):
        # If ledger_sync_order is not provided (is None), it is assumed that
        # `postCatchupCompleteClbk` of the LedgerInfo will be used
        self.owner = owner
        self.ownedByNode = ownedByNode
        self.postAllLedgersCaughtUp = postAllLedgersCaughtUp
        self.preCatchupClbk = preCatchupClbk
        self.ledger_sync_order = ledger_sync_order

        self.config = getConfig()
        # Needs to schedule actions. The owner of the manager has the
        # responsibility of calling its `_serviceActions` method periodically
        HasActionQueue.__init__(self)

        # Holds ledgers of different types with
        # their info like callbacks, state, etc
        self.ledgerRegistry = {}  # type: Dict[int, LedgerInfo]

        # Largest 3 phase key received during catchup.
        # This field is needed to discard any stashed 3PC messages or
        # ordered messages since the transactions part of those messages
        # will be applied when they are received through the catchup process
        self.last_caught_up_3PC = (0, 0)
コード例 #11
0
def test_msg_len_limit_large_enough_for_preprepare():
    config = getConfig()

    batch_size = config.Max3PCBatchSize
    requests = [Request(signatures={})] * batch_size
    req_idr = [req.digest for req in requests]
    digest = Replica.batchDigest(requests)
    state_root = Base58Serializer().serialize(BLANK_ROOT)
    txn_root = Ledger.hashToStr(CompactMerkleTree().root_hash)

    pp = PrePrepare(
        0,
        0,
        0,
        get_utc_epoch(),
        req_idr,
        init_discarded(),
        digest,
        0,
        state_root,
        txn_root,
        0,
        True)

    assert len(ZStack.serializeMsg(pp)) <= config.MSG_LEN_LIMIT
コード例 #12
0
ファイル: load.py プロジェクト: aigoncharov/plenum
def load():
    port = genHa()[1]
    ha = HA('0.0.0.0', port)
    name = "hello"
    wallet = Wallet(name)
    wallet.addIdentifier(signer=SimpleSigner(
        seed=b'000000000000000000000000Steward1'))
    client = Client(name, ha=ha)
    with Looper(debug=getConfig().LOOPER_DEBUG) as looper:
        looper.add(client)
        print('Will send {} reqs in all'.format(numReqs))
        requests = sendRandomRequests(wallet, client, numReqs)
        start = perf_counter()
        for i in range(0, numReqs, numReqs // splits):
            print('Will wait for {} now'.format(numReqs // splits))
            s = perf_counter()
            reqs = requests[i:i + numReqs // splits + 1]
            waitForSufficientRepliesForRequests(looper,
                                                client,
                                                requests=reqs,
                                                fVal=2,
                                                customTimeoutPerReq=3)
            print('>>> Got replies for {} requests << in {}'.format(
                numReqs // splits,
                perf_counter() - s))
        end = perf_counter()
        print('>>>{}<<<'.format(end - start))
        exit(0)
コード例 #13
0
    def __init__(self, data: ConsensusSharedData, timer: TimerService,
                 bus: InternalBus, network: ExternalBus,
                 stasher: StashingRouter):
        self._config = getConfig()
        self._logger = getlogger()

        self._data = data
        self._new_view_builder = NewViewBuilder(self._data)
        self._timer = timer
        self._bus = bus
        self._network = network
        self._router = stasher
        self._votes = ViewChangeVotesForView(self._data.quorums)
        self._new_view = None  # type: Optional[NewView]

        self._router.subscribe(ViewChange, self.process_view_change_message)
        self._router.subscribe(ViewChangeAck,
                               self.process_view_change_ack_message)
        self._router.subscribe(NewView, self.process_new_view_message)

        self._old_prepared = {}  # type: Dict[int, BatchID]
        self._old_preprepared = {}  # type: Dict[int, List[BatchID]]
        self._primaries_selector = RoundRobinPrimariesSelector()

        self._subscription = Subscription()
        self._subscription.subscribe(self._bus, NeedViewChange,
                                     self.process_need_view_change)
コード例 #14
0
    def __init__(self,
                 data: ConsensusSharedData,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 metrics: MetricsCollector = NullMetricsCollector()):
        self._data = data
        self._timer = timer
        self._bus = bus
        self._network = network
        self.metrics = metrics

        self._config = getConfig()

        self._primary_disconnection_time = timer.get_current_time()  # type: Optional[float]

        self._propose_view_change_timer = RepeatingTimer(timer=timer,
                                                         interval=self._config.NEW_VIEW_TIMEOUT,
                                                         callback=self._propose_view_change_if_needed,
                                                         active=False)

        self._subscription = Subscription()
        self._subscription.subscribe(network, ExternalBus.Connected, self.process_connected)
        self._subscription.subscribe(network, ExternalBus.Disconnected, self.process_disconnected)
        self._subscription.subscribe(bus, PrimarySelected, self.process_primary_selected)
        self._subscription.subscribe(bus, NodeStatusUpdated, self.process_node_status_updated)

        if self._data.is_master:
            self._schedule_primary_connection_check(delay=self._config.INITIAL_PROPOSE_VIEW_CHANGE_TIMEOUT)
コード例 #15
0
    def __init__(self,
                 data: ConsensusSharedData,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 freshness_checker: FreshnessChecker,
                 get_time_for_3pc_batch: Optional[Callable[[], int]] = None,
                 metrics: MetricsCollector = NullMetricsCollector()):
        self._data = data
        self._timer = timer
        self._bus = bus
        self._network = network
        self.metrics = metrics

        self._freshness_checker = freshness_checker
        self._get_time_for_3pc_batch = get_time_for_3pc_batch if get_time_for_3pc_batch is not None else get_utc_epoch

        self._config = getConfig()

        # Start periodic freshness check
        state_freshness_update_interval = self._config.STATE_FRESHNESS_UPDATE_INTERVAL
        if state_freshness_update_interval > 0:
            self._check_freshness_timer = RepeatingTimer(
                self._timer, state_freshness_update_interval,
                self._check_freshness)
コード例 #16
0
 def __init__(self, tmpdir):
     self.basedirpath = tmpdir
     self.name = 'Node1'
     self.f = 1
     self.replicas = []
     self.rank = None
     self.allNodeNames = [self.name, 'Node2', 'Node3', 'Node4']
     self.nodeReg = {name: HA("127.0.0.1", 0) for name in self.allNodeNames}
     self.totalNodes = len(self.allNodeNames)
     self.mode = Mode.starting
     self.replicas = [
         Replica(node=self, instId=0, isMaster=True),
         Replica(node=self, instId=1, isMaster=False),
         Replica(node=self, instId=2, isMaster=False),
     ]
     self._found = False
     self.ledgerManager = LedgerManager(self, ownedByNode=True)
     ledger0 = FakeLedger(0, 10)
     ledger1 = FakeLedger(1, 5)
     self.ledgerManager.addLedger(0, ledger0)
     self.ledgerManager.addLedger(1, ledger1)
     self.quorums = Quorums(self.totalNodes)
     self.config = getConfig()  # TODO do we need fake object here?
     self.view_changer = ViewChanger(self)
     self.elector = PrimarySelector(self)
コード例 #17
0
ファイル: conftest.py プロジェクト: evernym/plenum
def logcapture(request, whitelist, concerningLogLevels):
    baseWhitelist = ['seconds to run once nicely',
                     'Executing %s took %.3f seconds',
                     'is already stopped',
                     'Error while running coroutine',
                     # TODO: This is too specific, move it to the particular test
                     "Beta discarding message INSTANCE_CHANGE(viewNo='BAD') "
                     "because field viewNo has incorrect type: <class 'str'>"
                     ]
    wlfunc = inspect.isfunction(whitelist)

    def tester(record):
        isBenign = record.levelno not in concerningLogLevels
        # TODO is this sufficient to test if a log is from test or not?
        isTest = os.path.sep + 'test' in record.pathname

        if wlfunc:
            wl = whitelist()
        else:
            wl = whitelist

        whiteListedExceptions = baseWhitelist + wl
        isWhiteListed = bool([w for w in whiteListedExceptions
                              if w in str(record.msg)])
        if not (isBenign or isTest or isWhiteListed):
            raise BlowUp("{}: {} ".format(record.levelname, record.msg))

    ch = TestingHandler(tester)
    logging.getLogger().addHandler(ch)

    request.addfinalizer(lambda: logging.getLogger().removeHandler(ch))
    config = getConfig(tdir)
    for k, v in overriddenConfigValues.items():
        setattr(config, k, v)
コード例 #18
0
ファイル: stacked.py プロジェクト: evernym/plenum
    def __init__(self, *args, **kwargs):
        checkPortAvailable(kwargs['ha'])
        basedirpath = kwargs.get('basedirpath')
        keep = RoadKeep(basedirpath=basedirpath,
                        stackname=kwargs['name'],
                        auto=kwargs.get('auto'),
                        baseroledirpath=basedirpath)  # type: RoadKeep
        kwargs['keep'] = keep
        localRoleData = keep.loadLocalRoleData()

        sighex = kwargs.pop('sighex', None) or localRoleData['sighex']
        if not sighex:
            (sighex, _), (prihex, _) = getEd25519AndCurve25519Keys()
        else:
            prihex = ed25519SkToCurve25519(sighex, toHex=True)
        kwargs['sigkey'] = sighex
        kwargs['prikey'] = prihex
        self.msgHandler = kwargs.pop('msgHandler', None)  # type: Callable
        super().__init__(*args, **kwargs)
        if self.ha[1] != kwargs['ha'].port:
            error("the stack port number has changed, likely due to "
                  "information in the keep. {} passed {}, actual {}".
                  format(kwargs['name'], kwargs['ha'].port, self.ha[1]))
        self.created = time.perf_counter()
        self.coro = None
        config = getConfig()
        try:
            self.messageTimeout = config.RAETMessageTimeout
        except AttributeError:
            # if no timeout is set then message will never timeout
            self.messageTimeout = 0
コード例 #19
0
    def __init__(self, name: str, validators: List[str], inst_id: int):
        self._name = name
        self.inst_id = inst_id
        self.view_no = 0
        self.waiting_for_new_view = False
        self.primaries = []

        self.legacy_vc_in_progress = False
        self.requests = Requests()
        self.last_ordered_3pc = (0, 0)
        self.primary_name = None
        # seqNoEnd of the last stabilized checkpoint
        self.stable_checkpoint = 0
        # Checkpoint messages which the current node sent.
        self.checkpoints = SortedListWithKey(
            key=lambda checkpoint: checkpoint.seqNoEnd)
        # List of PrePrepare messages, for which quorum of Prepare messages is not reached yet
        self.preprepared = []  # type:  List[PrePrepare]
        # List of PrePrepare messages, for which quorum of Prepare messages is reached
        self.prepared = []  # type:  List[PrePrepare]
        self._validators = None
        self._quorums = None
        self.set_validators(validators)
        self._low_watermark = 0
        self.log_size = getConfig().LOG_SIZE
        self.high_watermark = self.low_watermark + self.log_size
        self.pp_seq_no = 0
        self.node_mode = Mode.starting
        # ToDo: it should be set in view_change_service before view_change starting
        self.legacy_last_prepared_before_view_change = None
コード例 #20
0
def logcapture(request, whitelist, concerningLogLevels):
    baseWhitelist = ['seconds to run once nicely',
                     'Executing %s took %.3f seconds',
                     'is already stopped',
                     'Error while running coroutine',
                     'not trying any more because',
                     # TODO: This is too specific, move it to the particular
                     # test
                     "Beta discarding message INSTANCE_CHANGE(viewNo='BAD') "
                     "because field viewNo has incorrect type: <class 'str'>",
                     'got exception while closing hash store',
                     # TODO: Remove these once the relevant bugs are fixed
                     '.+ failed to ping .+ at',
                     'discarding message (NOMINATE|PRIMARY)',
                     '.+ rid .+ has been removed',
                     'last try...',
                     'has uninitialised socket',
                     'to have incorrect time',
                     'time not acceptable'
                     ]
    wlfunc = inspect.isfunction(whitelist)

    def tester(record):
        isBenign = record.levelno not in concerningLogLevels
        # TODO is this sufficient to test if a log is from test or not?
        isTest = os.path.sep + 'test' in record.pathname

        if wlfunc:
            wl = whitelist()
        else:
            wl = whitelist

        whiteListedExceptions = baseWhitelist + wl

        # Converting the log message to its string representation, the log
        # message can be an arbitrary object
        if not (isBenign or isTest):
            msg = str(record.msg)
            isWhiteListed = any(re.search(w, msg)
                                for w in whiteListedExceptions)
            if not isWhiteListed:
                # Stopping all loopers, so prodables like nodes, clients, etc stop.
                #  This helps in freeing ports
                for fv in request._fixture_values.values():
                    if isinstance(fv, Looper):
                        fv.stopall()
                    if isinstance(fv, Prodable):
                        fv.stop()
                raise BlowUp("{}: {} ".format(record.levelname, record.msg))

    ch = TestingHandler(tester)
    logging.getLogger().addHandler(ch)

    def cleanup():
        logging.getLogger().removeHandler(ch)

    request.addfinalizer(cleanup)
    config = getConfig(tdir)
    for k, v in overriddenConfigValues.items():
        setattr(config, k, v)
コード例 #21
0
 def __init__(self, stackParams: dict, msgHandler: Callable, seed=None,
              config=None):
     config = config or getConfig()
     SimpleZStack.__init__(self, stackParams, msgHandler, seed=seed,
                           onlyListener=True, config=config)
     MessageProcessor.__init__(self, allowDictOnly=False)
     self.connectedClients = set()
コード例 #22
0
 def getPluginPath(name):
     config = getConfig()
     if PLUGIN_BASE_DIR_PATH in config.DefaultPluginPath:
         return os.path.join(config.DefaultPluginPath.get(
             PLUGIN_BASE_DIR_PATH), name)
     else:
         curPath = os.path.dirname(os.path.abspath(__file__))
         return os.path.join(curPath, name)
コード例 #23
0
def expectedClientToPoolConnectionTimeout(nodeCount):
    """
    From: the Client is not connected to the Pool
    To: the Client is connected to the Pool
    """
    config = getConfig()
    return config.ExpectedConnectTime * nodeCount + \
           config.RETRY_TIMEOUT_RESTRICTED
コード例 #24
0
ファイル: ledger.py プロジェクト: zhigunenko-dsr/indy-plenum
 def _defaultStore(dataDir,
                   logName,
                   ensureDurability,
                   open=True,
                   config=None) -> KeyValueStorage:
     config = config or getConfig()
     return initKeyValueStorageIntKeys(config.transactionLogDefaultStorage,
                                       dataDir, logName, open)
コード例 #25
0
 def __init__(self, stackParams: dict, msgHandler: Callable,
              registry: Dict[str, HA], seed=None, sighex: str=None,
              config=None):
     config = config or getConfig()
     Batched.__init__(self)
     KITZStack.__init__(self, stackParams, msgHandler, registry=registry,
                        seed=seed, sighex=sighex, config=config)
     MessageProcessor.__init__(self, allowDictOnly=False)
コード例 #26
0
    def __init__(self,
                 name: str,
                 validators: List[str],
                 primary_name: str,
                 timer: TimerService,
                 bus: InternalBus,
                 network: ExternalBus,
                 write_manager: WriteRequestManager,
                 bls_bft_replica: BlsBftReplica = None):
        # ToDo: Maybe ConsensusSharedData should be initiated before and passed already prepared?
        self._internal_bus = bus
        self._data = ConsensusSharedData(name, validators, 0)
        self._data.primary_name = generateName(primary_name,
                                               self._data.inst_id)
        self.config = getConfig()
        self.stasher = StashingRouter(self.config.REPLICA_STASH_LIMIT,
                                      buses=[bus, network])
        self._write_manager = write_manager
        self._primaries_selector = RoundRobinNodeRegPrimariesSelector(
            self._write_manager.node_reg_handler)
        self._orderer = OrderingService(
            data=self._data,
            timer=timer,
            bus=bus,
            network=network,
            write_manager=self._write_manager,
            bls_bft_replica=bls_bft_replica,
            freshness_checker=FreshnessChecker(
                freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL),
            primaries_selector=self._primaries_selector,
            stasher=self.stasher)
        self._checkpointer = CheckpointService(self._data, bus, network,
                                               self.stasher,
                                               write_manager.database_manager)
        self._view_changer = ViewChangeService(self._data, timer, bus, network,
                                               self.stasher,
                                               self._primaries_selector)
        self._message_requestor = MessageReqService(self._data, bus, network)

        self._add_ledgers()

        # TODO: This is just for testing purposes only
        self._data.checkpoints.append(
            Checkpoint(instId=0,
                       viewNo=0,
                       seqNoStart=0,
                       seqNoEnd=0,
                       digest='4F7BsTMVPKFshM1MwLf6y23cid6fL3xMpazVoF9krzUw'))

        # ToDo: it should be done in Zero-view stage.
        write_manager.on_catchup_finished()
        self._data.primaries = self._view_changer._primaries_selector.select_primaries(
            self._data.view_no)

        # ToDo: ugly way to understand node_reg changing
        self._previous_node_reg = self._write_manager.node_reg_handler.committed_node_reg

        bus.subscribe(Ordered, self.emulate_ordered_processing)
コード例 #27
0
ファイル: waits.py プロジェクト: michaeldboyd/indy-plenum
def expectedClientCatchupTime(nodeCount):
    """
    From: the Client finished the consistency proof procedure
    To: the Client finished the catchup procedure
    """
    config = getConfig()
    qN = Quorums(nodeCount).commit.value
    return qN * 2 * __Peer2PeerRequestExchangeTime + \
           config.CatchupTransactionsTimeout
コード例 #28
0
ファイル: waits.py プロジェクト: michaeldboyd/indy-plenum
def expectedClientConsistencyProof(nodeCount):
    """
    From: the Client is connected to the Pool
    To: the Client finished the consistency proof procedure
    """
    config = getConfig()
    qN = Quorums(nodeCount).commit.value
    return qN * __Peer2PeerRequestExchangeTime + \
           config.ConsistencyProofsTimeout
コード例 #29
0
ファイル: waits.py プロジェクト: tbohsali/indy-plenum
def expectedClientCatchupTime(nodeCount):
    """
    From: the Client finished the consistency proof procedure
    To: the Client finished the catchup procedure
    """
    config = getConfig()
    qN = Quorums(nodeCount).commit.value
    return qN * 2 * __Peer2PeerRequestExchangeTime + \
        config.CatchupTransactionsTimeout
コード例 #30
0
ファイル: waits.py プロジェクト: tbohsali/indy-plenum
def expectedClientConsistencyProof(nodeCount):
    """
    From: the Client is connected to the Pool
    To: the Client finished the consistency proof procedure
    """
    config = getConfig()
    qN = Quorums(nodeCount).commit.value
    return qN * __Peer2PeerRequestExchangeTime + \
        config.ConsistencyProofsTimeout
コード例 #31
0
ファイル: waits.py プロジェクト: tbohsali/indy-plenum
def expectedPoolCatchupTime(nodeCount):
    """
    From: the consistency proof procedure is finished
    To: each of the Nodes finished the the catchup procedure
    """
    config = getConfig()
    nodeCatchupTimeout = __Peer2PeerRequestExchangeTime + \
        config.CatchupTransactionsTimeout
    return nodeCount * nodeCatchupTimeout
コード例 #32
0
ファイル: plugin_helper.py プロジェクト: evernym/plenum
def loadPlugins(baseDir):
    global pluginsLoaded

    alreadyLoadedPlugins = pluginsLoaded.get(baseDir)
    i = 0
    if alreadyLoadedPlugins:
        logger.debug("Plugins {} are already loaded from basedir: {}".format(
            alreadyLoadedPlugins, baseDir))
    else:
        logger.debug(
            "Plugin loading started to load plugins from basedir: {}".format(
                baseDir))

        config = getConfig()
        pluginsDirPath = os.path.expanduser(os.path.join(
            baseDir, config.PluginsDir))

        if not os.path.exists(pluginsDirPath):
            os.makedirs(pluginsDirPath)
            logger.debug("Plugin directory created at: {}".format(
                pluginsDirPath))

        if hasattr(config, "PluginsToLoad"):
            for pluginName in config.PluginsToLoad:
                try:
                    pluginPath = os.path.expanduser(os.path.join(pluginsDirPath,
                                              pluginName + ".py"))
                    if os.path.exists(pluginPath):
                        spec = spec_from_file_location(
                            pluginName,
                            pluginPath)
                        plugin = module_from_spec(spec)
                        spec.loader.exec_module(plugin)
                        if baseDir in pluginsLoaded:
                            pluginsLoaded[baseDir].add(pluginName)
                        else:
                            pluginsLoaded[baseDir] = {pluginName}
                        i += 1
                    else:
                        if not pluginsNotFound.get(pluginPath):
                            logger.warn("Note: Plugin file does not exists: {}. "
                                        "Create plugin file if you want to load it"
                                        .format(pluginPath), extra={"cli": False})
                            pluginsNotFound[pluginPath] = "Notified"

                except Exception as ex:
                    # TODO: Is this strategy ok to catch any exception and
                    # just print the error and continue,
                    # or it should fail if there is error in plugin loading
                    logger.warn(
                        "** Error occurred during loading plugin {}: {}"
                            .format(pluginPath, str(ex)))

    logger.debug(
        "Total plugins loaded from basedir {} are : {}".format(baseDir, i))
    return i
コード例 #33
0
ファイル: plugin_helper.py プロジェクト: aigoncharov/plenum
def loadPlugins(baseDir):
    global pluginsLoaded

    alreadyLoadedPlugins = pluginsLoaded.get(baseDir)
    i = 0
    if alreadyLoadedPlugins:
        logger.debug("Plugins {} are already loaded from basedir: {}".format(
            alreadyLoadedPlugins, baseDir))
    else:
        logger.debug(
            "Plugin loading started to load plugins from basedir: {}".format(
                baseDir))

        config = getConfig()
        pluginsDirPath = os.path.expanduser(
            os.path.join(baseDir, config.PluginsDir))

        if not os.path.exists(pluginsDirPath):
            os.makedirs(pluginsDirPath)
            logger.debug(
                "Plugin directory created at: {}".format(pluginsDirPath))

        if hasattr(config, "PluginsToLoad"):
            for pluginName in config.PluginsToLoad:
                try:
                    pluginPath = os.path.expanduser(
                        os.path.join(pluginsDirPath, pluginName + ".py"))
                    if os.path.exists(pluginPath):
                        spec = spec_from_file_location(pluginName, pluginPath)
                        plugin = module_from_spec(spec)
                        spec.loader.exec_module(plugin)
                        if baseDir in pluginsLoaded:
                            pluginsLoaded[baseDir].add(pluginName)
                        else:
                            pluginsLoaded[baseDir] = {pluginName}
                        i += 1
                    else:
                        if not pluginsNotFound.get(pluginPath):
                            logger.warning(
                                "Note: Plugin file does not exists: {}. "
                                "Create plugin file if you want to load it".
                                format(pluginPath),
                                extra={"cli": False})
                            pluginsNotFound[pluginPath] = "Notified"

                except Exception as ex:
                    # TODO: Is this strategy ok to catch any exception and
                    # just print the error and continue,
                    # or it should fail if there is error in plugin loading
                    logger.warning(
                        "** Error occurred during loading plugin {}: {}".
                        format(pluginPath, str(ex)))

    logger.debug("Total plugins loaded from basedir {} are : {}".format(
        baseDir, i))
    return i
コード例 #34
0
ファイル: waits.py プロジェクト: tbohsali/indy-plenum
def expectedPoolConsistencyProof(nodeCount):
    """
    From: any time the Pool ready for the consistency proof procedure
    To: each of the Nodes finish the consistency proof procedure
        (ready for catchup if it is needed)
    """
    config = getConfig()
    nodeCPTimeout = __Peer2PeerRequestExchangeTime + \
        config.ConsistencyProofsTimeout
    return nodeCount * nodeCPTimeout
コード例 #35
0
ファイル: waits.py プロジェクト: michaeldboyd/indy-plenum
def expectedPoolConsistencyProof(nodeCount):
    """
    From: any time the Pool ready for the consistency proof procedure
    To: each of the Nodes finish the consistency proof procedure
        (ready for catchup if it is needed)
    """
    config = getConfig()
    nodeCPTimeout = __Peer2PeerRequestExchangeTime + \
                    config.ConsistencyProofsTimeout
    return nodeCount * nodeCPTimeout
コード例 #36
0
 def __init__(self, database_manager: DatabaseManager):
     super().__init__()
     self.database_manager = database_manager
     self.batch_handlers = {}  # type: Dict[int,List[BatchRequestHandler]]
     self.state_serializer = pool_state_serializer
     self.audit_b_handler = None
     self.node_reg_handler = None
     self.config = getConfig()
     self._request_handlers_with_version = {
     }  # type: Dict[Tuple[int, str], List[WriteRequestHandler]]
コード例 #37
0
    def _getDefaultPluginsByType(typ):
        config = getConfig()
        allPluginsPath = []

        if typ in config.DefaultPluginPath:
            allPluginsPath.append(PluginLoaderHelper.getPluginPath(
                config.DefaultPluginPath.get(typ)))
            allPlugins = PluginLoaderHelper._getAllPlugins(allPluginsPath)
            return allPlugins.get(typ, [])
        else:
            return []
コード例 #38
0
 def __init__(self, stackParams: dict, msgHandler: Callable,
              registry: Dict[str, HA], seed=None, sighex: str=None,
              config=None, metrics=NullMetricsCollector()):
     config = config or getConfig()
     Batched.__init__(self, config=config, metrics=metrics)
     KITZStack.__init__(self, stackParams, msgHandler, registry=registry,
                        seed=seed, sighex=sighex, config=config,
                        metrics=metrics,
                        mt_incoming_size=MetricType.INCOMING_NODE_MESSAGE_SIZE,
                        mt_outgoing_size=MetricType.OUTGOING_NODE_MESSAGE_SIZE)
     MessageProcessor.__init__(self, allowDictOnly=False)
コード例 #39
0
 def _defaultStore(dataDir,
                   logName,
                   ensureDurability,
                   open=True,
                   config=None,
                   read_only=False) -> KeyValueStorage:
     config = config or getConfig()
     return initKeyValueStorageIntKeys(config.transactionLogDefaultStorage,
                                       dataDir, logName, open, read_only=read_only,
                                       db_config=config.db_transactions_config,
                                       txn_serializer=ledger_txn_serializer)
コード例 #40
0
 def __init__(self, node, config=None):
     self._node = node
     self._config = config or getConfig()
     self._db = None
     self._use_db = self._config.VALIDATOR_INFO_USE_DB
     self.__name = self._node.name
     self.__node_info_dir = self._node.node_info_dir
     self.dump_version_info()
     if self._use_db:
         self._db = KeyValueStorageRocksdbIntKeys(self.__node_info_dir,
                                                  self.GENERAL_DB_NAME_TEMPLATE.format(
                                                      node_name=self.__name.lower()))
コード例 #41
0
ファイル: ledger_manager.py プロジェクト: evernym/plenum
    def __init__(self, owner, ownedByNode: bool=True):
        self.owner = owner
        self.ownedByNode = ownedByNode
        self.config = getConfig()
        # Needs to schedule actions. The owner of the manager has the
        # responsibility of calling its `_serviceActions` method periodically.
        HasActionQueue.__init__(self)

        # Holds ledgers of different types with their info like the ledger
        # object, various callbacks, state (can be synced, is already synced,
        # etc).
        self.ledgers = {}   # type: Dict[int, Dict[str, Any]]

        # Ledger statuses received while the ledger was not ready to be synced
        # (`canSync` was set to False)
        self.stashedLedgerStatuses = {}  # type: Dict[int, deque]

        # Dict of sets with each set corresponding to a ledger
        # Each set tracks which nodes claim that this node's ledger status is ok
        # , if a quorum of nodes (2f+1) say its up to date then mark the catchup
        #  process as completed
        self.ledgerStatusOk = {}        # type: Dict[int, Set]

        # Consistency proofs received in process of catching up.
        # Each element of the dict is the dictionary of consistency proofs
        # received for the ledger. For each dictionary key is the node name and
        # value is a consistency proof.
        self.recvdConsistencyProofs = {}  # type: Dict[int, Dict[str,
        # ConsistencyProof]]

        self.catchUpTill = {}

        # Catchup replies that need to be applied to the ledger. First element
        # of the list is a list of transactions that need to be applied to the
        # pool transaction ledger and the second element is the list of
        # transactions that need to be applied to the domain transaction ledger
        self.receivedCatchUpReplies = {}    # type: Dict[int, List]

        self.recvdCatchupRepliesFrm = {}
        # type: Dict[int, Dict[str, List[CatchupRep]]]

        # Tracks the beginning of consistency proof timer. Timer starts when the
        #  node gets f+1 consistency proofs. If the node is not able to begin
        # the catchup process even after the timer expires then it requests
        # consistency proofs.
        self.consistencyProofsTimers = {}
        # type: Dict[int, Optional[float]]

        # Tracks the beginning of catchup reply timer. Timer starts after the
        #  node sends catchup requests. If the node is not able to finish the
        # the catchup process even after the timer expires then it requests
        # missing transactions.
        self.catchupReplyTimers = {}
コード例 #42
0
ファイル: log.py プロジェクト: evernym/plenum
def setupLogging(log_level, raet_log_level=None, filename=None,
                 raet_log_file=None):
    """
    Setup for logging.
    log level is TRACE by default.
    """

    from plenum.common.config_util import getConfig
    # TODO: This should take directory
    config = getConfig()
    addTraceToLogging()
    addDisplayToLogging()

    logHandlers = []
    if filename:
        d = os.path.dirname(filename)
        if not os.path.exists(d):
            os.makedirs(d)
        fileHandler = TimeAndSizeRotatingFileHandler(
            filename,
            when=config.logRotationWhen,
            interval=config.logRotationInterval,
            backupCount=config.logRotationBackupCount,
            utc=True,
            maxBytes=config.logRotationMaxBytes)
        logHandlers.append(fileHandler)
    else:
        logHandlers.append(logging.StreamHandler(sys.stdout))

    fmt = logging.Formatter(fmt=config.logFormat, style=config.logFormatStyle)

    for h in logHandlers:
        if h.formatter is None:
            h.setFormatter(fmt)
        logging.root.addHandler(h)

    logging.root.setLevel(log_level)

    console = getConsole()

    defaultVerbosity = getRAETLogLevelFromConfig("RAETLogLevel",
                                                 Console.Wordage.terse, config)
    logging.info("Choosing RAET log level {}".format(defaultVerbosity),
                 extra={"cli": False})
    verbosity = raet_log_level \
        if raet_log_level is not None \
        else defaultVerbosity
    raetLogFilePath = raet_log_file or getRAETLogFilePath("RAETLogFilePath",
                                                          config)
    console.reinit(verbosity=verbosity, path=raetLogFilePath, flushy=True)
    global loggingConfigured
    loggingConfigured = True
コード例 #43
0
ファイル: waits.py プロジェクト: michaeldboyd/indy-plenum
def expectedClientToPoolConnectionTimeout(nodeCount):
    """
    From: the Client is not connected to the Pool
    To: the Client is connected to the Pool
    """
    # '+KITZStack.RETRY_TIMEOUT_RESTRICTED' is a workaround for
    # bug (`'str' object has no attribute 'keys'`) which supposed to be
    # fixed in the 3pcbatch feature
    # https://evernym.atlassian.net/browse/SOV-995
    # TODO check actual state
    config = getConfig()
    return config.ExpectedConnectTime * nodeCount + \
           config.RETRY_TIMEOUT_RESTRICTED
コード例 #44
0
 def __init__(self, dataDir, fileNamePrefix="", db_type=HS_LEVELDB, read_only=False, config=None):
     self.dataDir = dataDir
     if db_type not in (HS_ROCKSDB, HS_LEVELDB):
         raise PlenumValueError(
             'db_type', db_type, "one of {}".format((HS_ROCKSDB, HS_LEVELDB))
         )
     self.db_type = KeyValueStorageType.Leveldb if db_type == HS_LEVELDB \
         else KeyValueStorageType.Rocksdb
     self.config = config or getConfig()
     self.nodesDb = None
     self.leavesDb = None
     self._leafCount = 0
     self._read_only = read_only
     self.nodes_db_name = fileNamePrefix + '_merkleNodes'
     self.leaves_db_name = fileNamePrefix + '_merkleLeaves'
     self.open()
コード例 #45
0
ファイル: helper.py プロジェクト: michaeldboyd/indy-plenum
def initHashStore(data_dir, name, config=None, read_only=False) -> HashStore:
    """
    Create and return a hashStore implementation based on configuration
    """
    config = config or getConfig()
    hsConfig = config.hashStore['type'].lower()
    if hsConfig == HS_FILE:
        return FileHashStore(dataDir=data_dir,
                             fileNamePrefix=name)
    elif hsConfig == HS_LEVELDB or hsConfig == HS_ROCKSDB:
        return DbHashStore(dataDir=data_dir,
                           fileNamePrefix=name,
                           db_type=hsConfig,
                           read_only=read_only,
                           config=config)
    else:
        return MemoryHashStore()
コード例 #46
0
ファイル: waits.py プロジェクト: michaeldboyd/indy-plenum
def expectedPoolInterconnectionTime(nodeCount):
    """
    From: the Pool up
    To: the Pool is fully connected
    """
    config = getConfig()
    interconnectionCount = totalConnections(nodeCount)
    nodeConnectionTimeout = config.ExpectedConnectTime
    # '+KITZStack.RETRY_TIMEOUT_RESTRICTED' is a workaround for
    # bug (`'str' object has no attribute 'keys'`) which supposed to be
    # fixed in the 3pcbatch feature
    # https://evernym.atlassian.net/browse/SOV-995
    # TODO check actual state
    # multiply by 2 because we need to re-create connections which can be done on a second re-try only
    # (we may send pings on some of the re-tries)
    return min(0.8 * config.TestRunningTimeLimitSec,
               interconnectionCount * nodeConnectionTimeout +
               2 * config.RETRY_TIMEOUT_RESTRICTED + 2)
コード例 #47
0
ファイル: new_client.py プロジェクト: evernym/plenum
def run_node():

    with Looper(debug=False) as looper:
        # Nodes persist keys when bootstrapping to other nodes and reconnecting
        # using an ephemeral temporary directory when proving a concept is a
        # nice way to keep things clean.
        config = getConfig()
        basedirpath = config.baseDir
        cliNodeReg = {k: v[0] for k, v in config.cliNodeReg.items()}
        clientName = 'Alice'

        # this seed is used by the signer to deterministically generate
        # a signature verification key that is shared out of band with the
        # consensus pool
        seed = b'22222222222222222222222222222222'
        assert len(seed) == 32
        signer = SimpleSigner(clientName, seed)

        client_address = ('0.0.0.0', 9700)

        client = Client(clientName,
                        cliNodeReg,
                        ha=client_address,
                        signer=signer,
                        basedirpath=basedirpath)
        looper.add(client)

        # give the client time to connect
        looper.runFor(3)

        # a simple message
        msg = {'life_answer': 42}

        # submit the request to the pool
        request, = client.submit_DEPRECATED(msg)

        # allow time for the request to be executed
        looper.runFor(3)

        reply, status = client.getReply(request.reqId)
        print('')
        print('Reply: {}\n'.format(reply))
        print('Status: {}\n'.format(status))
コード例 #48
0
ファイル: __main__.py プロジェクト: evernym/plenum
def main(logfile: str=None, debug=None, cliClass=None):
    config = getConfig()
    nodeReg = config.nodeReg
    cliNodeReg = config.cliNodeReg
    basedirpath = config.baseDir

    if not cliClass:
        cliClass = Cli

    with Looper(debug=False) as looper:
        cli = cliClass(looper=looper,
                       basedirpath=basedirpath,
                       nodeReg=nodeReg,
                       cliNodeReg=cliNodeReg,
                       logFileName=logfile,
                       debug=debug)

        if not debug:
            looper.run(cli.shell(*sys.argv[1:]))
            print('Goodbye.')
        return cli
コード例 #49
0
ファイル: ledger.py プロジェクト: michaeldboyd/indy-plenum
    def __init__(self,
                 tree: MerkleTree,
                 dataDir: str,
                 txn_serializer: MappingSerializer = None,
                 hash_serializer: MappingSerializer = None,
                 fileName: str = None,
                 ensureDurability: bool = True,
                 transactionLogStore: KeyValueStorage = None,
                 genesis_txn_initiator: GenesisTxnInitiator = None,
                 config=None,
                 read_only=False):
        """
        :param tree: an implementation of MerkleTree
        :param dataDir: the directory where the transaction log is stored
        :param serializer: an object that can serialize the data before hashing
        it and storing it in the MerkleTree
        :param fileName: the name of the transaction log file
        :param genesis_txn_initiator: file or dir to use for initialization of transaction log store
        """
        self.genesis_txn_initiator = genesis_txn_initiator

        self.dataDir = dataDir
        self.tree = tree
        self.config = config or getConfig()
        self._read_only = read_only
        self.txn_serializer = txn_serializer or ledger_txn_serializer  # type: MappingSerializer
        # type: MappingSerializer
        self.hash_serializer = hash_serializer or ledger_hash_serializer
        self.hasher = TreeHasher()
        self._transactionLog = None  # type: KeyValueStorage
        self._transactionLogName = fileName or "transactions"
        self.ensureDurability = ensureDurability
        self._customTransactionLogStore = transactionLogStore
        self.seqNo = 0
        self.start()
        self.recoverTree()
        if self.genesis_txn_initiator and self.size == 0:
            self.genesis_txn_initiator.init_ledger_from_genesis_txn(self)
コード例 #50
0
ファイル: conftest.py プロジェクト: michaeldboyd/indy-plenum
def _tconf(general_config):
    config = getConfig(general_config)
    for k, v in overriddenConfigValues.items():
        setattr(config, k, v)

    # Reduce memory amplification during running tests in case of RocksDB used
    config.rocksdb_default_config['write_buffer_size'] = ROCKSDB_WRITE_BUFFER_SIZE
    config.rocksdb_default_config['db_log_dir'] = DEV_NULL_PATH

    # FIXME: much more clear solution is to check which key-value storage type is
    # used for each storage and set corresponding config, but for now only RocksDB
    # tuning is supported (now other storage implementations ignore this parameter)
    # so here we set RocksDB configs unconditionally for simplicity.
    config.db_merkle_leaves_config = config.rocksdb_default_config.copy()
    config.db_merkle_nodes_config = config.rocksdb_default_config.copy()
    config.db_state_config = config.rocksdb_default_config.copy()
    config.db_transactions_config = config.rocksdb_default_config.copy()
    config.db_seq_no_db_config = config.rocksdb_default_config.copy()
    config.db_node_status_db_config = config.rocksdb_default_config.copy()
    config.db_state_signature_config = config.rocksdb_default_config.copy()
    config.db_state_ts_db_config = config.rocksdb_default_config.copy()

    return config
コード例 #51
0
ファイル: client.py プロジェクト: evernym/plenum
    def __init__(self,
                 name: str,
                 nodeReg: Dict[str, HA]=None,
                 ha: Union[HA, Tuple[str, int]]=None,
                 basedirpath: str=None,
                 config=None,
                 sighex: str=None):
        """
        Creates a new client.

        :param name: unique identifier for the client
        :param nodeReg: names and host addresses of all nodes in the pool
        :param ha: tuple of host and port
        """
        self.config = config or getConfig()
        basedirpath = self.config.baseDir if not basedirpath else basedirpath
        self.basedirpath = basedirpath

        signer = Signer(sighex)
        sighex = signer.keyhex
        verkey = rawToFriendly(signer.verraw)

        self.name = name
        self.stackName = verkey

        cha = None
        # If client information already exists is RAET then use that
        if self.exists(self.stackName, basedirpath):
            cha = getHaFromLocalEstate(self.stackName, basedirpath)
            if cha:
                cha = HA(*cha)
                logger.debug("Client {} ignoring given ha {} and using {}".
                             format(self.name, ha, cha))
        if not cha:
            cha = ha if isinstance(ha, HA) else HA(*ha)

        self.reqRepStore = self.getReqRepStore()
        self.txnLog = self.getTxnLogStore()

        self.dataDir = self.config.clientDataDir or "data/clients"
        HasFileStorage.__init__(self, self.name, baseDir=self.basedirpath,
                                dataDir=self.dataDir)

        self._ledger = None

        if not nodeReg:
            self.mode = None
            HasPoolManager.__init__(self)
            self.ledgerManager = LedgerManager(self, ownedByNode=False)
            self.ledgerManager.addLedger(0, self.ledger,
                postCatchupCompleteClbk=self.postPoolLedgerCaughtUp,
                postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        else:
            cliNodeReg = OrderedDict()
            for nm, (ip, port) in nodeReg.items():
                cliNodeReg[nm] = HA(ip, port)
            self.nodeReg = cliNodeReg
            self.mode = Mode.discovered

        HasActionQueue.__init__(self)

        self.setF()

        stackargs = dict(name=self.stackName,
                         ha=cha,
                         main=False,  # stops incoming vacuous joins
                         auto=AutoMode.always)
        stackargs['basedirpath'] = basedirpath
        self.created = time.perf_counter()

        # noinspection PyCallingNonCallable
        self.nodestack = self.nodeStackClass(stackargs,
                                             self.handleOneNodeMsg,
                                             self.nodeReg,
                                             sighex)
        self.nodestack.onConnsChanged = self.onConnsChanged

        if self.nodeReg:
            logger.info("Client {} initialized with the following node registry:"
                        .format(self.name))
            lengths = [max(x) for x in zip(*[
                (len(name), len(host), len(str(port)))
                for name, (host, port) in self.nodeReg.items()])]
            fmt = "    {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format(
                *lengths)
            for name, (host, port) in self.nodeReg.items():
                logger.info(fmt.format(name, host, port))
        else:
            logger.info(
                "Client {} found an empty node registry:".format(self.name))

        Motor.__init__(self)

        self.inBox = deque()

        self.nodestack.connectNicelyUntil = 0  # don't need to connect
        # nicely as a client

        # TODO: Need to have couple of tests around `reqsPendingConnection`
        # where we check with and without pool ledger

        # Stores the requests that need to be sent to the nodes when the client
        # has made sufficient connections to the nodes.
        self.reqsPendingConnection = deque()

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REQACK
        self.expectingAcksFor = {}

        # Tuple of identifier and reqId as key and value as tuple of set of
        # nodes which are expected to send REPLY
        self.expectingRepliesFor = {}

        tp = loadPlugins(self.basedirpath)
        logger.debug("total plugins loaded in client: {}".format(tp))
コード例 #52
0
ファイル: monitor.py プロジェクト: michaeldboyd/indy-plenum
    def __init__(self, name: str, Delta: float, Lambda: float, Omega: float,
                 instances: Instances, nodestack,
                 blacklister: Blacklister, nodeInfo: Dict,
                 notifierEventTriggeringConfig: Dict,
                 pluginPaths: Iterable[str] = None,
                 notifierEventsEnabled: bool = True):
        self.name = name
        self.instances = instances
        self.nodestack = nodestack
        self.blacklister = blacklister
        self.nodeInfo = nodeInfo
        self.notifierEventTriggeringConfig = notifierEventTriggeringConfig
        self.notifierEventsEnabled = notifierEventsEnabled

        self.Delta = Delta
        self.Lambda = Lambda
        self.Omega = Omega
        self.statsConsumers = self.getPluginsByType(pluginPaths,
                                                    PLUGIN_TYPE_STATS_CONSUMER)

        self.config = getConfig()

        # Number of ordered requests by each replica. The value at key `i` in
        # the dict is a tuple of the number of ordered requests by replica and
        # the time taken to order those requests by the replica of the `i`th
        # protocol instance
        self.numOrderedRequests = dict()  # type: Dict[int, Tuple[int, int]]

        # Dict(instance_id, throughput) of throughputs for replicas. Key is a instId and value is a instance of
        # ThroughputMeasurement class and provide throughputs evaluating mechanism
        self.throughputs = dict()   # type: Dict[int, ThroughputMeasurement]

        # Utility object for tracking requests order start and end
        # TODO: Has very similar cleanup logic to propagator.Requests
        self.requestTracker = RequestTimeTracker(instances.ids)

        # Request latencies for the master protocol instances. Key of the
        # dictionary is a tuple of client id and request id and the value is
        # the time the master instance took for ordering it
        self.masterReqLatencies = {}  # type: Dict[Tuple[str, int], float]

        # Indicates that request latency in previous snapshot of master req
        # latencies was too high
        self.masterReqLatencyTooHigh = False

        # Request latency(time taken to be ordered) for the client. The value
        # at key `i` in the dict is the LatencyMeasurement object which accumulate
        # average latency and total request for each client.
        self.clientAvgReqLatencies = dict()  # type: Dict[int, LatencyMeasurement]

        # TODO: Set this if this monitor belongs to a node which has primary
        # of master. Will be used to set `totalRequests`
        self.hasMasterPrimary = None

        # Total requests that have been ordered since the node started
        self.totalRequests = 0

        self.started = datetime.utcnow().isoformat()

        # attention: handlers will work over unordered request only once
        self.unordered_requests_handlers = []  # type: List[Callable]

        # Monitoring suspicious spikes in cluster throughput
        self.clusterThroughputSpikeMonitorData = {
            'value': 0,
            'cnt': 0,
            'accum': []
        }

        psutil.cpu_percent(interval=None)
        self.lastKnownTraffic = self.calculateTraffic()

        self.totalViewChanges = 0
        self._lastPostedViewChange = 0
        HasActionQueue.__init__(self)

        if self.config.SendMonitorStats:
            self.startRepeating(self.sendPeriodicStats,
                                self.config.DashboardUpdateFreq)

        self.startRepeating(
            self.checkPerformance,
            self.config.notifierEventTriggeringConfig['clusterThroughputSpike']['freq'])

        self.startRepeating(self.check_unordered, self.config.UnorderedCheckFreq)

        if 'disable_view_change' in self.config.unsafe:
            self.isMasterDegraded = lambda: False
        if 'disable_monitor' in self.config.unsafe:
            self.requestOrdered = lambda *args, **kwargs: {}
            self.sendPeriodicStats = lambda: None
            self.checkPerformance = lambda: None

        self.latency_avg_for_backup_cls = self.config.LatencyAveragingStrategyClass
        self.latency_measurement_cls = self.config.LatencyMeasurementCls
        self.throughput_avg_strategy_cls = self.config.throughput_averaging_strategy_class

        self.acc_monitor = None

        if self.config.ACC_MONITOR_ENABLED:
            self.acc_monitor = AccumulatingMonitorStrategy(
                start_time=time.perf_counter(),
                instances=instances.ids,
                txn_delta_k=self.config.ACC_MONITOR_TXN_DELTA_K,
                timeout=self.config.ACC_MONITOR_TIMEOUT,
                input_rate_reaction_half_time=self.config.ACC_MONITOR_INPUT_RATE_REACTION_HALF_TIME)
コード例 #53
0
import asyncio
from unittest.mock import Mock

import pytest

from plenum.common.config_util import getConfig
from plenum.config import STATS_SERVER_PORT, STATS_SERVER_IP
from plenum.server.plugin.stats_consumer.stats_publisher import StatsPublisher

config = getConfig()


@pytest.fixture(scope="function")
def listener():
    def _acceptClient(clientReader, clientWriter):
        pass

    loop = asyncio.get_event_loop()
    server = loop.run_until_complete(
        asyncio.start_server(_acceptClient,
                             host=STATS_SERVER_IP, port=STATS_SERVER_PORT,
                             loop=loop))
    yield server
    server.close()
    loop.run_until_complete(server.wait_closed())


def testSendOneMessageNoOneListens(postingStatsEnabled):
    statsPublisher = TestStatsPublisher()
    statsPublisher.send(message="testMessage")
コード例 #54
0
ファイル: waits.py プロジェクト: evernym/plenum
def expectedWaitDirect(count):
    conf = getConfig()
    return count * conf.ExpectedConnectTime + 1
コード例 #55
0
ファイル: replica.py プロジェクト: evernym/plenum
    def __init__(self, node: 'plenum.server.node.Node', instId: int,
                 isMaster: bool = False):
        """
        Create a new replica.

        :param node: Node on which this replica is located
        :param instId: the id of the protocol instance the replica belongs to
        :param isMaster: is this a replica of the master protocol instance
        """
        super().__init__()
        self.stats = Stats(TPCStat)

        self.config = getConfig()

        routerArgs = [(ReqDigest, self._preProcessReqDigest)]

        for r in [PrePrepare, Prepare, Commit]:
            routerArgs.append((r, self.processThreePhaseMsg))

        routerArgs.append((Checkpoint, self.processCheckpoint))
        routerArgs.append((ThreePCState, self.process3PhaseState))

        self.inBoxRouter = Router(*routerArgs)

        self.threePhaseRouter = Router(
                (PrePrepare, self.processPrePrepare),
                (Prepare, self.processPrepare),
                (Commit, self.processCommit)
        )

        self.node = node
        self.instId = instId

        self.name = self.generateName(node.name, self.instId)

        self.outBox = deque()
        """
        This queue is used by the replica to send messages to its node. Replica
        puts messages that are consumed by its node
        """

        self.inBox = deque()
        """
        This queue is used by the replica to receive messages from its node.
        Node puts messages that are consumed by the replica
        """

        self.inBoxStash = deque()
        """
        If messages need to go back on the queue, they go here temporarily and
        are put back on the queue on a state change
        """

        self.isMaster = isMaster

        # Indicates name of the primary replica of this protocol instance.
        # None in case the replica does not know who the primary of the
        # instance is
        self._primaryName = None    # type: Optional[str]

        # Requests waiting to be processed once the replica is able to decide
        # whether it is primary or not
        self.postElectionMsgs = deque()

        # PRE-PREPAREs that are waiting to be processed but do not have the
        # corresponding request digest. Happens when replica has not been
        # forwarded the request by the node but is getting 3 phase messages.
        # The value is a list since a malicious entry might send PRE-PREPARE
        # with a different digest and since we dont have the request finalised,
        # we store all PRE-PPREPARES
        self.prePreparesPendingReqDigest = {}   # type: Dict[Tuple[str, int], List]

        # PREPAREs that are stored by non primary replica for which it has not
        #  got any PRE-PREPARE. Dictionary that stores a tuple of view no and
        #  prepare sequence number as key and a deque of PREPAREs as value.
        # This deque is attempted to be flushed on receiving every
        # PRE-PREPARE request.
        self.preparesWaitingForPrePrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # COMMITs that are stored for which there are no PRE-PREPARE or PREPARE
        # received
        self.commitsWaitingForPrepare = {}
        # type: Dict[Tuple[int, int], deque]

        # Dictionary of sent PRE-PREPARE that are stored by primary replica
        # which it has broadcasted to all other non primary replicas
        # Key of dictionary is a 2 element tuple with elements viewNo,
        # pre-prepare seqNo and value is a tuple of Request Digest and time
        self.sentPrePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received PRE-PREPAREs. Key of dictionary is a 2
        # element tuple with elements viewNo, pre-prepare seqNo and value is
        # a tuple of Request Digest and time
        self.prePrepares = {}
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], float]]

        # Dictionary of received Prepare requests. Key of dictionary is a 2
        # element tuple with elements viewNo, seqNo and value is a 2 element
        # tuple containing request digest and set of sender node names(sender
        # replica names in case of multiple protocol instances)
        # (viewNo, seqNo) -> ((identifier, reqId), {senders})
        self.prepares = Prepares()
        # type: Dict[Tuple[int, int], Tuple[Tuple[str, int], Set[str]]]

        self.commits = Commits()    # type: Dict[Tuple[int, int],
        # Tuple[Tuple[str, int], Set[str]]]

        # Set of tuples to keep track of ordered requests. Each tuple is
        # (viewNo, ppSeqNo)
        self.ordered = OrderedSet()        # type: OrderedSet[Tuple[int, int]]

        # Dictionary to keep track of the which replica was primary during each
        # view. Key is the view no and value is the name of the primary
        # replica during that view
        self.primaryNames = {}  # type: Dict[int, str]

        # Holds msgs that are for later views
        self.threePhaseMsgsForLaterView = deque()
        # type: deque[(ThreePhaseMsg, str)]

        # Holds tuple of view no and prepare seq no of 3-phase messages it
        # received while it was not participating
        self.stashingWhileCatchingUp = set()       # type: Set[Tuple]

        # Commits which are not being ordered since commits with lower view
        # numbers and sequence numbers have not been ordered yet. Key is the
        # viewNo and value a map of pre-prepare sequence number to commit
        self.stashedCommitsForOrdering = {}         # type: Dict[int,
        # Dict[int, Commit]]

        self.checkpoints = SortedDict(lambda k: k[0])

        self.stashingWhileOutsideWaterMarks = deque()

        # Low water mark
        self._h = 0              # type: int

        # High water mark
        self.H = self._h + self.config.LOG_SIZE   # type: int

        self.lastPrePrepareSeqNo = self.h  # type: int
コード例 #56
0
ファイル: conftest.py プロジェクト: evernym/plenum
def postingStatsEnabled(request):
    config = getConfig()
    config.SendMonitorStats = True
コード例 #57
0
ファイル: conftest.py プロジェクト: evernym/plenum
def conf(tdir):
    return getConfig(tdir)