def __init__(self, name: str, basedirpath: str, client: Client = None, wallet: Wallet = None, port: int = None, loop=None, config=None, endpointArgs=None): config = config or getConfig() basedirpath = basedirpath or os.path.expanduser(config.baseDir) portParam, _ = self.getPassedArgs() self.logger = getlogger() super().__init__(name, basedirpath, client, wallet, portParam or port, loop=loop, config=config, endpointArgs=endpointArgs) self.claimVersionNumber = 0.01 self._invites = {} self.updateClaimVersionFile(self.getClaimVersionFileName()) signal.signal(signal.SIGTERM, self.exit_gracefully) self.setupLogging(self.getLoggerFilePath())
def run_node(config, name, node_ip, node_port, client_ip, client_port): node_ha = HA(node_ip, node_port) client_ha = HA(client_ip, client_port) node_config_helper = NodeConfigHelper(name, config) logFileName = os.path.join(node_config_helper.log_dir, name + ".log") logger = getlogger() Logger().apply_config(config) Logger().enableFileLogging(logFileName) logger.setLevel(config.logLevel) logger.debug("You can find logs in {}".format(logFileName)) vars = [var for var in os.environ.keys() if var.startswith("INDY")] logger.debug("Indy related env vars: {}".format(vars)) with Looper(debug=config.LOOPER_DEBUG) as looper: node = Node(name, config_helper=node_config_helper, ha=node_ha, cliha=client_ha) node = integrate(node_config_helper, node, logger) looper.add(node) looper.run()
def _dump(): logger = getlogger() cli = ctx['current_cli'] nocli = {"cli": False} wrts = ''.join(cli.cli.output.writes) logger.info('=========================================', extra=nocli) logger.info('| OUTPUT DUMP |', extra=nocli) logger.info('-----------------------------------------', extra=nocli) for w in wrts.splitlines(): logger.info('> ' + w, extra=nocli) logger.info('=========================================', extra=nocli)
from ledger.genesis_txn.genesis_txn_file_util import genesis_txn_path from plenum.common.config_util import getConfig from storage.kv_store_rocksdb_int_keys import KeyValueStorageRocksdbIntKeys from stp_core.common.constants import ZMQ_NETWORK_PROTOCOL from stp_core.common.log import getlogger from pympler import muppy, summary, asizeof def decode_err_handler(error): length = error.end - error.start return length * ' ', error.end codecs.register_error('decode_errors', decode_err_handler) logger = getlogger() MBs = 1024 * 1024 INDY_ENV_FILE = "indy.env" NODE_CONTROL_CONFIG_FILE = "node_control.conf" INDY_NODE_SERVICE_FILE_PATH = "/etc/systemd/system/indy-node.service" NODE_CONTROL_SERVICE_FILE_PATH = "/etc/systemd/system/indy-node-control.service" NUMBER_TXNS_FOR_DISPLAY = 10 LIMIT_OBJECTS_FOR_PROFILER = 10 def none_on_fail(func): def wrap(*args, **kwargs): try: return func(*args, **kwargs) except Exception as ex:
def fault(ex: Exception, msg: str): from stp_core.common.log import getlogger getlogger().error(msg, exc_info=ex)
def logger(): logger = getlogger() old_value = logger.getEffectiveLevel() logger.root.setLevel(logging.DEBUG) yield logger logger.root.setLevel(old_value)
from importlib.util import module_from_spec, spec_from_file_location import os from stp_core.common.log import getlogger pluginsLoaded = {} # Dict(plugins_dir, List[plugin names]) pluginsNotFound = {} # Dict(plugins_dir, List[plugin names]) logger = getlogger("plugin-loader") def loadPlugins(plugins_dir, plugins_to_load=None): global pluginsLoaded alreadyLoadedPlugins = pluginsLoaded.get(plugins_dir) i = 0 if alreadyLoadedPlugins: logger.debug("Plugins {} are already loaded from plugins_dir: {}".format( alreadyLoadedPlugins, plugins_dir)) else: logger.debug( "Plugin loading started to load plugins from plugins_dir: {}".format( plugins_dir)) if not os.path.exists(plugins_dir): os.makedirs(plugins_dir) logger.debug("Plugin directory created at: {}".format( plugins_dir)) if plugins_to_load is not None: for pluginName in plugins_to_load:
def set_info_log_level(): logger = getlogger() lvl = logger.level logger.setLevel(logging.INFO) yield logger.setLevel(lvl)
def setTestLogLevel(): logger = getlogger() logger.level = logging.NOTSET
from stp_core.loop.eventually import eventually from stp_core.common.log import getlogger from plenum.common.util import getMaxFailures from plenum.test.cli.mock_output import MockOutput from plenum.test.cli.test_keyring import createNewKeyring from plenum.test.helper import waitForSufficientRepliesForRequests from plenum.test.spy_helpers import getAllArgs from plenum.test.test_client import TestClient from plenum.test.test_node import TestNode, checkPoolReady from plenum.test.testable import spyable from pygments.token import Token from functools import partial from plenum.test import waits from plenum.common import util logger = getlogger() class Recorder: """ This class will write an interleaved log of the CLI session into a temp directory. The directory will start with "cli_scripts_ and should contain files for each CLI that was created, e.g., earl, pool, etc. """ def __init__(self, partition): basedir = os.path.join(gettempdir(), 'cli_scripts') try: os.mkdir(basedir) except FileExistsError: pass self.directory = mkdtemp(dir=basedir,
def __init__(self, node: 'plenum.server.node.Node', instId: int, config=None, isMaster: bool = False, bls_bft_replica: BlsBftReplica = None, metrics: MetricsCollector = NullMetricsCollector(), get_current_time=None, get_time_for_3pc_batch=None): """ Create a new replica. :param node: Node on which this replica is located :param instId: the id of the protocol instance the replica belongs to :param isMaster: is this a replica of the master protocol instance """ HasActionQueue.__init__(self) self.get_current_time = get_current_time or time.perf_counter self.get_time_for_3pc_batch = get_time_for_3pc_batch or node.utc_epoch # self.stats = Stats(TPCStat) self.config = config or getConfig() self.metrics = metrics self.node = node self.instId = instId self.name = self.generateName(node.name, self.instId) self.logger = getlogger(self.name) self.validator = ReplicaValidator(self) self.outBox = deque() """ This queue is used by the replica to send messages to its node. Replica puts messages that are consumed by its node """ self.inBox = deque() """ This queue is used by the replica to receive messages from its node. Node puts messages that are consumed by the replica """ self.inBoxStash = deque() """ If messages need to go back on the queue, they go here temporarily and are put back on the queue on a state change """ self._is_master = isMaster # Dictionary to keep track of the which replica was primary during each # view. Key is the view no and value is the name of the primary # replica during that view self.primaryNames = OrderedDict() # type: OrderedDict[int, str] # Flag being used for preterm exit from the loop in the method # `processStashedMsgsForNewWaterMarks`. See that method for details. self.consumedAllStashedMsgs = True self._freshness_checker = FreshnessChecker(freshness_timeout=self.config.STATE_FRESHNESS_UPDATE_INTERVAL) self._bls_bft_replica = bls_bft_replica self._state_root_serializer = state_roots_serializer # Did we log a message about getting request while absence of primary self.warned_no_primary = False self._consensus_data = ConsensusSharedData(self.name, self.node.poolManager.node_names_ordered_by_rank(), self.instId, self.isMaster) self._internal_bus = InternalBus() self._external_bus = ExternalBus(send_handler=self.send) self.stasher = self._init_replica_stasher() self._subscription = Subscription() self._bootstrap_consensus_data() self._subscribe_to_external_msgs() self._subscribe_to_internal_msgs() self._checkpointer = self._init_checkpoint_service() self._ordering_service = self._init_ordering_service() self._message_req_service = self._init_message_req_service() self._view_change_service = self._init_view_change_service() for ledger_id in self.ledger_ids: self.register_ledger(ledger_id)