def test_setLevelShowngLogger(loggerLevel, isSuperiorTo, logRecordLevel): """ Set gLogger level: check whether a log record should be displayed """ capturedBackend, log, sublog = gLoggerReset() levels = gLogger.getAllPossibleLevels() gLogger.setLevel(loggerLevel) # convert level name into its integer value logRecordLevelValue = LogLevels.getLevelValue(logRecordLevel) res = gLogger._createLogRecord(logRecordLevelValue, 'message', '') # clean the log to remove unecessary information logstring = cleaningLog(capturedBackend.getvalue()) # if loggerLevel is superior to logRecordLevel then: # - log record should not appear # - shown should return False as the log doesn't appear # - value returned by createLogRecord should be False too isLoggerLvlSupToLogRecordLvl = LogLevels.getLevelValue( loggerLevel) > logRecordLevelValue assert isLoggerLvlSupToLogRecordLvl == isSuperiorTo if isLoggerLvlSupToLogRecordLvl: assert not gLogger.shown(logRecordLevel) assert not res assert logstring == "" else: assert gLogger.shown(logRecordLevel) assert res assert logstring == "Framework %s: message\n" % logRecordLevel.upper() capturedBackend.truncate(0) capturedBackend.seek(0)
def setLevel(self, levelName): """ Check if the level name exists and set it. :param levelName: string representing the level to give to the logger :return: boolean representing if the setting is done or not """ result = False if levelName.upper() in LogLevels.getLevelNames(): self._logger.setLevel(LogLevels.getLevelValue(levelName)) result = True return result
def setLevel(self, levelName): """ Configure the level of the handler associated to the backend. Make sure the handler has been created before calling the method. :param int level: a level """ result = False if levelName.upper() in LogLevels.getLevelNames(): self._handler.setLevel(LogLevels.getLevelValue(levelName)) self._level = levelName result = True return result
def setLevel(self, levelName): """ Check if the level name exists and get the integer value before setting it. :params levelName: string representing the level to give to the logger :return: boolean representing if the setting is done or not """ result = False if levelName.upper() in LogLevels.getLevelNames(): self._setLevel(LogLevels.getLevelValue(levelName)) result = True return result
def test_createLogRecord(sMsg, sVarMsg, exc_info, expected): """ Create logs of different levels with multiple logs """ capturedBackend, log, sublog = gLoggerReset() # Set the level to debug gLogger.setLevel("debug") # dictionary of key = logger to use, value = output associated to the logger logDict = {gLogger: "", log: "/log", sublog: "/log/sublog"} # get list of existing levels, for each of them, a log record is created levels = gLogger.getAllPossibleLevels() for level in levels: for logger, logInfo in logDict.items(): # createLogRecord is the method in charge of creating the log record # debug, ..., always methods wrap the following method # we use logLevels to get the int value corresponding to the level name logger._createLogRecord(LogLevels.getLevelValue(level), sMsg, sVarMsg, exc_info) # clean the log to remove unecessary information logstring = cleaningLog(capturedBackend.getvalue()) logExpected = "Framework%s %s: %s\n" % (logInfo, level, expected) assert logExpected == logstring capturedBackend.truncate(0) capturedBackend.seek(0)
def test_getLevel(value, logLevel): """ Test getLevel """ if logLevel: logLevel = logLevel.upper() assert LogLevels.getLevel(value) == logLevel
def __init__(self, optionDict): """Contruct the object, set the base LogLevel to DEBUG, and parse the options.""" self._configDict = {"dirac": {LEVEL: LogLevels.DEBUG}} optionDict.pop("Plugin", None) for module, level in optionDict.items(): self.__fillConfig(self._configDict, module.split(DOT), LogLevels.getLevelValue(level))
def __init__(self, father=None, fatherName='', name='', customName=''): """ Initialization of the Logging object. By default, 'fatherName' and 'name' are empty, because getChild accepts only string and the first empty string corresponds to the root logger. Example: logging.getLogger('') == logging.getLogger('root') == root logger logging.getLogger('root').getChild('log') == root.log == log child of root :params father: Logging, father of this new Logging. :params fatherName: string representing the name of the father logger in the chain. :params name: string representing the name of the logger in the chain. :params customName: string representing the name of the logger in the chain: - "root" does not appear at the beginning of the chain - hierarchy "." are replaced by "\" useful for the display of the Logging name """ # Logging chain self._children = {} self._parent = father # initialize display options and level with the ones of the Logging parent if self._parent is not None: self._options = self._parent.getDisplayOptions() self._level = LogLevels.getLevelValue(father.getLevel()) else: self._options = {'headerIsShown': True, 'threadIDIsShown': False, 'Color': False} # the native level is not used because it has to be to debug to send all # messages to the log central self._level = None # dictionary of the option state, modified by the user or not # this is to give to the options the same behaviour that the "logging" level: # - propagation from the parent to the children when their levels are not set by the developer himself # - stop the propagation when a developer set a level to a child self._optionsModified = {'headerIsShown': False, 'threadIDIsShown': False} self._levelModified = False self._backendsList = [] # name of the Logging self.name = str(name) self._logger = logging.getLogger(fatherName).getChild(self.name) # update the custom name of the Logging adding the new Logging name in the # entire path self._customName = os.path.join("/", customName, self.name) # Locks to make Logging thread-safe # we use RLock to prevent blocking in the Logging # lockInit to protect the initialization of a sublogger self._lockInit = self._lockRing.getLock("init") # lockOptions to protect the option modifications and the backendsList self._lockOptions = self._lockRing.getLock("options", recursive=True) # lockLevel to protect the level self._lockLevel = self._lockRing.getLock("level", recursive=True) # lockObjectLoader to protect the ObjectLoader singleton self._lockObjectLoader = self._lockRing.getLock("objectLoader")
def shown(self, levelName): """ Determine if messages with a certain level will be displayed or not. :params levelName: string representing the level to analyse :return: boolean which give the answer """ # lock to prevent a level change self._lockLevel.acquire() try: result = False if levelName.upper() in LogLevels.getLevelNames(): result = self._level <= LogLevels.getLevelValue(levelName) return result finally: self._lockLevel.release()
def test_getLevelNames(): """ Test getLevelNames """ levels = [ "DEBUG", "VERBOSE", "INFO", "WARN", "NOTICE", "ERROR", "ALWAYS", "FATAL" ] assert sorted(LogLevels.getLevelNames()) == sorted(levels)
def main(): try: CAR = CreateArchiveRequest() CAR.run() except Exception as e: if LogLevels.getLevelValue(sLog.getLevel()) <= LogLevels.VERBOSE: sLog.exception("Failed to create Archive Request") else: sLog.error("ERROR: Failed to create Archive Request:", str(e)) exit(1) exit(0)
def main(): try: CMR = CreateMovingRequest() CMR.run() except Exception as e: if LogLevels.getLevelValue(sLog.getLevel()) <= LogLevels.VERBOSE: sLog.exception('Failed to create Moving Request') else: sLog.error('ERROR: Failed to create Moving Request:', str(e)) exit(1) exit(0)
def __init__(self): """ Initialization of the LoggingRoot object. LoggingRoot : - initialize the UTC time - set the correct level defines by the user, or the default - add the custom level to logging: verbose, notice, always - register a default backend: stdout : all messages will be displayed here - update the format according to the command line argument """ super(LoggingRoot, self).__init__() # this line removes some useless information from log records and improves # the performances logging._srcfile = None # pylint: disable=protected-access # initialize the root logger # actually a child of the root logger to avoid conflicts with other # libraries which used 'logging' self._logger = logging.getLogger('dirac') # prevent propagation to the root logger to avoid conflicts with external libraries # which want to use the root logger self._logger.propagate = False # here we redefine the custom name to the empty string to remove the "\" # in the display self._customName = "" # this level is not the Logging level, it is only used to send all log messages to the central logging system # to do such an operation, we need to let pass all log messages to the root logger, so all logger needs to be # at debug. Then, all the backends have a level associated to a Logging level, which can be changed with the # setLevel method of Logging, and these backends will choose to send the # log messages or not. self._logger.setLevel(LogLevels.DEBUG) # initialization of the UTC time # Actually, time.gmtime is equal to UTC time because it has its DST flag to 0 # which means there is no clock advance logging.Formatter.converter = time.gmtime # initialization of levels levels = LogLevels.getLevels() for level in levels: logging.addLevelName(levels[level], level) # initialization of the default backend self._setLevel(LogLevels.NOTICE) # use the StdoutBackend directly to avoid dependancy loop with ObjectLoader self._addBackend(StdoutBackend()) # configuration of the level and update of the format self.__configureLevel() self._generateBackendFormat()
def _createLogRecord(self, level, sMsg, sVarMsg, exc_info=False, local_context=None): """ Create a log record according to the level of the message. - The log record is always sent to the different backends - Backends have their own levels and may manage the display of the log record :param int level: level of the log record :param str sMsg: message :param str sVarMsg: additional message :param bool exc_info: indicates whether the stacktrace has to appear in the log record :param dict local_context: Extra information propagated as extra to the formater. It is meant to be used only by the LocalSubLogger :return: boolean representing the result of the log record creation """ # lock to prevent a level change after that the log is sent. self._lockLevel.acquire() try: # exc_info is only for exception to add the stack trace # extra is a way to add extra attributes to the log record: # - 'componentname': the system/component name # - 'varmessage': the variable message # - 'customname' : the name of the logger for the DIRAC usage: without 'root' and separated with '/' # as log records, extras attributes are not camel case extra = { "componentname": self._componentName, "varmessage": str(sVarMsg), "spacer": "" if not sVarMsg else " ", "customname": self._customName, } # options such as headers and threadIDs also depend on the logger, we have to add them to extra extra.update(self._options) # This typically contains local custom names if local_context: extra.update(local_context) self._logger.log(level, "%s", sMsg, exc_info=exc_info, extra=extra) # check whether the message is displayed isSent = LogLevels.getLevelValue(self.getLevel()) <= level return isSent finally: self._lockLevel.release()
def __init__(self): """ Initialization of the LoggingRoot object. LoggingRoot : - initialize the UTC time - set the correct level defines by the user, or the default - add the custom level to logging: verbose, notice, always - register a default backend (stdout): all messages will be displayed here - update the format according to the command line argument """ super(LoggingRoot, self).__init__() # this line removes some useless information from log records and improves the performances logging._srcfile = None # pylint: disable=protected-access # initialize the root logger, which turns out to be a child of root, and disable propagation # to avoid any conflicts with external libs that would use "logging" too self._logger = logging.getLogger("dirac") self._logger.propagate = False # here we redefine the custom name to the empty string to remove the "\" in the display self._customName = "" # initialization of levels levels = LogLevels.getLevels() for level in levels: logging.addLevelName(levels[level], level) # root Logger level is set to NOTICE by default self._logger.setLevel(LogLevels.NOTICE) # initialization of the UTC time # Actually, time.gmtime is equal to UTC time: it has its DST flag to 0 which means there is no clock advance logging.Formatter.converter = time.gmtime # initialization of the default backend # use the StdoutBackend directly to avoid dependancy loop with ObjectLoader self._addBackend(StdoutBackend) # configuration of the level and update the format self.__configureLevel()
def getAllPossibleLevels(): """ :return: a list of all levels available """ return LogLevels.getLevelNames()
return if notAt and self.switches.get('AllowReplication'): self._replicateSourceFiles(request, notAt) else: raise RuntimeError('Not all files are at the Source, exiting') def _replicateSourceFiles(self, request, lfns): """Create the replicateAndRegisterRequest. :param request: The request to add the operation to :param lfns: list of LFNs """ registerSource = Operation() registerSource.Type = 'ReplicateAndRegister' registerSource.TargetSE = self.sourceSEs[0] self.addLFNs(registerSource, lfns, addPFN=True) request.addOperation(registerSource) if __name__ == '__main__': try: CAR = CreateArchiveRequest() CAR.run() except Exception as e: if LogLevels.getLevelValue(LOG.getLevel()) <= LogLevels.VERBOSE: LOG.exception('Failed to create Archive Request') else: LOG.error('ERROR: Failed to create Archive Request:', str(e)) exit(1) exit(0)
def __init__(self, optionDict): """Contruct the object, set the base LogLevel to DEBUG, and parse the options.""" self._configDict = {'dirac': {LEVEL: LogLevels.DEBUG}} optionDict.pop('Plugin', None) for module, level in optionDict.items(): self.__fillConfig(self._configDict, module.split(DOT), LogLevels.getLevelValue(level))
return 0 for request in self.requests: putRequest = self.reqClient.putRequest(request) if not putRequest['OK']: sLog.error('unable to put request %r: %s' % (request.RequestName, putRequest['Message'])) continue requestIDs.append(str(putRequest['Value'])) sLog.always('Request %r has been put to ReqDB for execution.' % request.RequestName) if requestIDs: sLog.always('%d requests have been put to ReqDB for execution' % len(requestIDs)) sLog.always('RequestID(s): %s' % ' '.join(requestIDs)) sLog.always('You can monitor the request status using the command: dirac-rms-request <requestName/ID>') return 0 sLog.error('No requests created') return 1 if __name__ == '__main__': try: CMR = CreateMovingRequest() CMR.run() except Exception as e: if LogLevels.getLevelValue(sLog.getLevel()) <= LogLevels.VERBOSE: sLog.exception('Failed to create Moving Request') else: sLog.error('ERROR: Failed to create Moving Request:', str(e)) exit(1) exit(0)
def test_getLevelValue(logLevel, value): """ Test getLevelValue """ assert LogLevels.getLevelValue(logLevel) == value
""" _, _, _ = gLoggerReset() log = gLogger.getSubLogger("log") log.setLevel("notice") anotherLog = gLogger.getSubLogger("log") assert log.getLevel() == anotherLog.getLevel() assert log == anotherLog # Run the tests for all the log levels and exceptions # We may need to rerun the test if we are unlucky and the timestamps # don't match @flaky(max_runs=3) @pytest.mark.parametrize("logLevel", ["exception"] + [lvl.lower() for lvl in LogLevels.getLevelNames()]) def test_localSubLoggerObject(logLevel): """ Create a local subLogger and compare its output with the standard subLogger for all the log levels """ capturedBackend, log, _ = gLoggerReset() # Set the level to debug to always make sure that something is printed log.setLevel("debug") # Create a real subLogger and a localSubLogger # with the same "name" subLog = log.getSubLogger("child") localSubLog = log.getSubLogger("child") # Print and capture a message with the real sublogger
def getLevel(self): """ :return: the name of the level """ return LogLevels.getLevel(self._logger.getEffectiveLevel())
def getLevel(self): """ :return: the name of the level """ return LogLevels.getLevel(self._level)
_, _, _ = gLoggerReset() log = gLogger.getSubLogger("log") log.setLevel("notice") anotherLog = gLogger.getSubLogger("log") assert log.getLevel() == anotherLog.getLevel() assert log == anotherLog # Run the tests for all the log levels and exceptions # We may need to rerun the test if we are unlucky and the timestamps # don't match @flaky(max_runs=3) @pytest.mark.parametrize("logLevel", ["exception"] + [lvl.lower() for lvl in LogLevels.getLevelNames()]) def test_localSubLoggerObject(logLevel): """ Create a local subLogger and compare its output with the standard subLogger for all the log levels """ capturedBackend, log, _ = gLoggerReset() # Set the level to debug to always make sure that something is printed log.setLevel("debug") # Create a real subLogger and a localSubLogger # with the same "name" subLog = log.getSubLogger("child") localSubLog = log.getSubLogger("child") # Print and capture a message with the real sublogger