def setupLogging(log_level, raet_log_level=None, filename=None, raet_log_file=None): """ Setup for logging. log level is TRACE by default. """ config = util.getConfig() addTraceToLogging() addDisplayToLogging() logHandlers = [] if filename: d = os.path.dirname(filename) if not os.path.exists(d): os.makedirs(d) fileHandler = TimedRotatingFileHandler( filename, when=config.logRotationWhen, interval=config.logRotationInterval, backupCount=config.logRotationBackupCount, utc=True) logHandlers.append(fileHandler) else: logHandlers.append(logging.StreamHandler(sys.stdout)) fmt = logging.Formatter(fmt=config.logFormat, style=config.logFormatStyle) for h in logHandlers: if h.formatter is None: h.setFormatter(fmt) logging.root.addHandler(h) logging.root.setLevel(log_level) console = getConsole() # TODO: This should take directory config = util.getConfig() defaultVerbosity = getRAETLogLevelFromConfig("RAETLogLevel", Console.Wordage.terse, config) logging.info("Choosing RAET log level {}".format(defaultVerbosity), extra={"cli": False}) verbosity = raet_log_level \ if raet_log_level is not None \ else defaultVerbosity raetLogFilePath = raet_log_file or getRAETLogFilePath( "RAETLogFilePath", config) console.reinit(verbosity=verbosity, path=raetLogFilePath, flushy=True) global loggingConfigured loggingConfigured = True
def logcapture(request, whitelist, concerningLogLevels): baseWhitelist = [ 'seconds to run once nicely', 'Executing %s took %.3f seconds', 'is already stopped', 'Error while running coroutine', # TODO: This is too specific, move it to the particular test "Beta discarding message INSTANCE_CHANGE(viewNo='BAD') " "because field viewNo has incorrect type: <class 'str'>" ] wlfunc = inspect.isfunction(whitelist) def tester(record): isBenign = record.levelno not in concerningLogLevels # TODO is this sufficient to test if a log is from test or not? isTest = os.path.sep + 'test' in record.pathname if wlfunc: wl = whitelist() else: wl = whitelist whiteListedExceptions = baseWhitelist + wl isWhiteListed = bool( [w for w in whiteListedExceptions if w in record.msg]) if not (isBenign or isTest or isWhiteListed): raise BlowUp("{}: {} ".format(record.levelname, record.msg)) ch = TestingHandler(tester) logging.getLogger().addHandler(ch) request.addfinalizer(lambda: logging.getLogger().removeHandler(ch)) config = getConfig(tdir) for k, v in overriddenConfigValues.items(): setattr(config, k, v)
def __init__(self, *args, **kwargs): checkPortAvailable(kwargs['ha']) basedirpath = kwargs.get('basedirpath') keep = RoadKeep(basedirpath=basedirpath, stackname=kwargs['name'], auto=kwargs.get('auto'), baseroledirpath=basedirpath) # type: RoadKeep kwargs['keep'] = keep localRoleData = keep.loadLocalRoleData() sighex = localRoleData['sighex'] if not sighex: (sighex, _), (prihex, _) = getEd25519AndCurve25519Keys() else: prihex = ed25519SkToCurve25519(sighex, toHex=True) kwargs['sigkey'] = sighex kwargs['prikey'] = prihex self.msgHandler = kwargs.pop('msgHandler', None) # type: Callable super().__init__(*args, **kwargs) if self.ha[1] != kwargs['ha'].port: error("the stack port number has changed, likely due to " "information in the keep") self.created = time.perf_counter() self.coro = None config = getConfig() try: self.messageTimeout = config.RAETMessageTimeout except AttributeError: # if no timeout is set then message will never timeout self.messageTimeout = 0
def postingStatsEnabled(request): config = getConfig() config.SendMonitorStats = True def reset(): config.SendMonitorStats = False request.addfinalizer(reset)
def loadPlugins(baseDir): global pluginsLoaded alreadyLoadedPlugins = pluginsLoaded.get(baseDir) i = 0 if alreadyLoadedPlugins: logger.debug("Plugins {} are already loaded from basedir: {}".format( alreadyLoadedPlugins, baseDir)) else: logger.debug( "Plugin loading started to load plugins from basedir: {}".format( baseDir)) config = getConfig() pluginsDirPath = os.path.expanduser( os.path.join(baseDir, config.PluginsDir)) if not os.path.exists(pluginsDirPath): os.makedirs(pluginsDirPath) logger.debug( "Plugin directory created at: {}".format(pluginsDirPath)) if hasattr(config, "PluginsToLoad"): for pluginName in config.PluginsToLoad: try: pluginPath = os.path.expanduser( os.path.join(pluginsDirPath, pluginName + ".py")) if os.path.exists(pluginPath): spec = importlib.util.spec_from_file_location( pluginName, pluginPath) plugin = importlib.util.module_from_spec(spec) spec.loader.exec_module(plugin) if baseDir in pluginsLoaded: pluginsLoaded[baseDir].add(pluginName) else: pluginsLoaded[baseDir] = {pluginName} i += 1 else: if not pluginsNotFound.get(pluginPath): logger.warn( "Note: Plugin file does not exists: {}. " "Create plugin file if you want to load it". format(pluginPath), extra={"cli": False}) pluginsNotFound[pluginPath] = "Notified" except Exception as ex: # TODO: Is this strategy ok to catch any exception and # just print the error and continue, # or it should fail if there is error in plugin loading logger.warn( "** Error occurred during loading plugin {}: {}". format(pluginPath, str(ex))) logger.debug("Total plugins loaded from basedir {} are : {}".format( baseDir, i)) return i
def __init__(self, owner, ownedByNode: bool = True): self.owner = owner self.ownedByNode = ownedByNode self.config = getConfig() # Needs to schedule actions. The owner of the manager has the # responsibility of calling its `_serviceActions` method periodically. HasActionQueue.__init__(self) # Holds ledgers of different types with their info like the ledger # object, various callbacks, state (can be synced, is already synced, # etc). self.ledgers = {} # type: Dict[int, Dict[str, Any]] # Ledger statuses received while the ledger was not ready to be synced # (`canSync` was set to False) self.stashedLedgerStatuses = {} # type: Dict[int, deque] # Dict of sets with each set corresponding to a ledger # Each set tracks which nodes claim that this node's ledger status is ok # , if a quorum of nodes (2f+1) say its up to date then mark the catchup # process as completed self.ledgerStatusOk = {} # type: Dict[int, Set] # Consistency proofs received in process of catching up. # Each element of the dict is the dictionary of consistency proofs # received for the ledger. For each dictionary key is the node name and # value is a consistency proof. self.recvdConsistencyProofs = {} # type: Dict[int, Dict[str, # ConsistencyProof]] self.catchUpTill = {} # Catchup replies that need to be applied to the ledger. First element # of the list is a list of transactions that need to be applied to the # pool transaction ledger and the second element is the list of # transactions that need to be applied to the domain transaction ledger self.receivedCatchUpReplies = {} # type: Dict[int, List] self.recvdCatchupRepliesFrm = {} # type: Dict[int, Dict[str, List[CatchupRep]]] # Tracks the beginning of consistency proof timer. Timer starts when the # node gets f+1 consistency proofs. If the node is not able to begin # the catchup process even after the timer expires then it requests # consistency proofs. self.consistencyProofsTimers = {} # type: Dict[int, Optional[float]] # Tracks the beginning of catchup reply timer. Timer starts after the # node sends catchup requests. If the node is not able to finish the # the catchup process even after the timer expires then it requests # missing transactions. self.catchupReplyTimers = {}
def run_node(): with Looper(debug=False) as looper: # Nodes persist keys when bootstrapping to other nodes and reconnecting # using an ephemeral temporary directory when proving a concept is a # nice way to keep things clean. config = getConfig() basedirpath = config.baseDir cliNodeReg = {k: v[0] for k, v in config.cliNodeReg.items()} clientName = 'Alice' # this seed is used by the signer to deterministically generate # a signature verification key that is shared out of band with the # consensus pool seed = b'22222222222222222222222222222222' assert len(seed) == 32 signer = SimpleSigner(clientName, seed) client_address = ('0.0.0.0', 9700) client = Client(clientName, cliNodeReg, ha=client_address, signer=signer, basedirpath=basedirpath) looper.add(client) # give the client time to connect looper.runFor(3) # a simple message msg = {'life_answer': 42} # submit the request to the pool request, = client.submit_DEPRECATED(msg) # allow time for the request to be executed looper.runFor(3) reply, status = client.getReply(request.reqId) print('') print('Reply: {}\n'.format(reply)) print('Status: {}\n'.format(status))
def run_node(): with Looper(debug=False) as looper: # Nodes persist keys when bootstrapping to other nodes and reconnecting # using an ephemeral temporary directory when proving a concept is a # nice way to keep things clean. config = getConfig() basedirpath = os.path.expanduser(config.baseDir) cliNodeReg = {k: v[0] for k, v in config.cliNodeReg.items()} clientName = 'Alice' # this seed is used by the signer to deterministically generate # a signature verification key that is shared out of band with the # consensus pool seed = b'22222222222222222222222222222222' assert len(seed) == 32 signer = SimpleSigner(clientName, seed) client_address = ('0.0.0.0', 9700) client = Client(clientName, cliNodeReg, ha=client_address, signer=signer, basedirpath=basedirpath) looper.add(client) # give the client time to connect looper.runFor(3) # a simple message msg = {'life_answer': 42} # submit the request to the pool request, = client.submit(msg) # allow time for the request to be executed looper.runFor(3) reply, status = client.getReply(request.reqId) print('') print('Reply: {}\n'.format(reply)) print('Status: {}\n'.format(status))
def main(logfile: str = None, debug=None, cliClass=None): config = getConfig() nodeReg = config.nodeReg cliNodeReg = config.cliNodeReg basedirpath = config.baseDir if not cliClass: cliClass = Cli with Looper(debug=False) as looper: cli = cliClass(looper=looper, basedirpath=basedirpath, nodeReg=nodeReg, cliNodeReg=cliNodeReg, logFileName=logfile, debug=debug) if not debug: looper.run(cli.shell(*sys.argv[1:])) print('Goodbye.') return cli
def main(logfile: str=None, debug=None, cliClass=None): config = getConfig() nodeReg = config.nodeReg cliNodeReg = config.cliNodeReg basedirpath = config.baseDir if not cliClass: cliClass = Cli with Looper(debug=False) as looper: cli = cliClass(looper=looper, basedirpath=basedirpath, nodeReg=nodeReg, cliNodeReg=cliNodeReg, logFileName=logfile, debug=debug) if not debug: looper.run(cli.shell(*sys.argv[1:])) print('Goodbye.') return cli
def __init__(self, name: str, nodeReg: Dict[str, HA] = None, ha: Union[HA, Tuple[str, int]] = None, basedirpath: str = None, config=None): """ Creates a new client. :param name: unique identifier for the client :param nodeReg: names and host addresses of all nodes in the pool :param ha: tuple of host and port :param lastReqId: Request Id of the last request sent by client """ self.config = config or getConfig() basedirpath = self.config.baseDir if not basedirpath else basedirpath self.basedirpath = basedirpath cha = None # If client information already exists is RAET then use that if self.exists(name, basedirpath): logger.debug("Client {} ignoring given ha".format(ha)) cha = getHaFromLocalEstate(name, basedirpath) if cha: cha = HA(*cha) if not cha: cha = ha if isinstance(ha, HA) else HA(*ha) self.name = name self.reqRepStore = self.getReqRepStore() self.txnLog = self.getTxnLogStore() self.dataDir = self.config.clientDataDir or "data/clients" HasFileStorage.__init__(self, self.name, baseDir=self.basedirpath, dataDir=self.dataDir) self._ledger = None if not nodeReg: self.mode = None HasPoolManager.__init__(self) self.ledgerManager = LedgerManager(self, ownedByNode=False) self.ledgerManager.addLedger( 0, self.ledger, postCatchupCompleteClbk=self.postPoolLedgerCaughtUp, postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger) else: cliNodeReg = OrderedDict() for nm, (ip, port) in nodeReg.items(): cliNodeReg[nm] = HA(ip, port) self.nodeReg = cliNodeReg self.mode = Mode.discovered self.setF() stackargs = dict( name=name, ha=cha, main=False, # stops incoming vacuous joins auto=AutoMode.always) stackargs['basedirpath'] = basedirpath self.created = time.perf_counter() # noinspection PyCallingNonCallable self.nodestack = self.nodeStackClass(stackargs, self.handleOneNodeMsg, self.nodeReg) self.nodestack.onConnsChanged = self.onConnsChanged logger.info( "Client {} initialized with the following node registry:".format( name)) lengths = [ max(x) for x in zip(*[(len(name), len(host), len(str(port))) for name, (host, port) in self.nodeReg.items()]) ] fmt = " {{:<{}}} listens at {{:<{}}} on port {{:>{}}}".format( *lengths) for name, (host, port) in self.nodeReg.items(): logger.info(fmt.format(name, host, port)) Motor.__init__(self) self.inBox = deque() self.nodestack.connectNicelyUntil = 0 # don't need to connect # nicely as a client # TODO: Need to have couple of tests around `reqsPendingConnection` # where we check with and without pool ledger # Stores the requests that need to be sent to the nodes when the client # has made sufficient connections to the nodes. self.reqsPendingConnection = deque() tp = loadPlugins(self.basedirpath) logger.debug("total plugins loaded in client: {}".format(tp))
from typing import Tuple import json from plenum.common.types import EVENT_REQ_ORDERED, EVENT_NODE_STARTED, \ EVENT_PERIODIC_STATS_THROUGHPUT, PLUGIN_TYPE_STATS_CONSUMER, \ EVENT_VIEW_CHANGE, EVENT_PERIODIC_STATS_LATENCIES, EVENT_PERIODIC_STATS_NODES, EVENT_PERIODIC_STATS_TOTAL_REQUESTS from plenum.common.stacked import NodeStack from plenum.server.blacklister import SimpleBlacklister from plenum.common.util import getConfig from plenum.common.log import getlogger from plenum.server.has_action_queue import HasActionQueue from plenum.server.instances import Instances from plenum.server.plugin.has_plugin_loader_helper import PluginLoaderHelper logger = getlogger() config = getConfig() class Monitor(HasActionQueue, PluginLoaderHelper): """ Implementation of RBFT's monitoring mechanism. The monitoring metrics are collected at the level of a node. Each node monitors the performance of each instance. Throughput of requests and latency per client request are measured. """ def __init__(self, name: str, Delta: float, Lambda: float, Omega: float, instances: Instances, nodestack: NodeStack, blacklister: SimpleBlacklister, pluginPaths: Iterable[str]=None): self.name = name
def conf(tdir): return getConfig(tdir)
def conf(): return getConfig()