示例#1
0
def init_logger(log_name='corelibrary', log_path=None, log_level=10, stdout_level=30, file_level=10):
    """inits logger"""

    _short_format = '%(asctime)s %(levelname)-5s %(message)s'
    _long_format = '%(asctime)s %(module)-14s %(levelname)-8s %(message)-120s'

    if log_path is None:
        log_path = os.getcwd() + os.path.sep + 'pace.log'
    elif os.path.isdir(log_path):
        log_path += os.path.sep + 'pace.log'

    core_logger = getLogger(log_name)

    if stdout_level is not None:
        stdout_handler = StreamHandler()
        stdout_handler.setFormatter(Formatter(_short_format, '%Y%m%d %H%M%S'))
        stdout_handler.setLevel(stdout_level)
        core_logger.addHandler(stdout_handler)

    if file_level is not None:
        file_handler = RotatingFileHandler(log_path, backupCount=10)
        file_handler.doRollover()
        file_handler.setFormatter(Formatter(_long_format, '%Y-%m-%d %H:%M:%S'))
        file_handler.setLevel(file_level)
        core_logger.addHandler(file_handler)

    core_logger.setLevel(log_level)
    return core_logger
示例#2
0
def _file_handler(loglevel, log_file, log_format, command):
    """ Add a rotating file handler for the current Faceswap session. 1 backup is always kept.

    Parameters
    ----------
    loglevel: str
        The requested log level that messages should be logged at.
    log_file: str
        The location of the log file to write Faceswap's log to
    log_format: :class:`FaceswapFormatter:
        The formatting to store log messages as
    command: str
        The Faceswap command that is being run. Used to dictate whether the log file should
        have "_gui" appended to the filename or not.

    Returns
    -------
    :class:`logging.RotatingFileHandler`
        The logging file handler
    """
    if log_file is not None:
        filename = log_file
    else:
        filename = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), "faceswap")
        # Windows has issues sharing the log file with sub-processes, so log GUI separately
        filename += "_gui.log" if command == "gui" else ".log"

    should_rotate = os.path.isfile(filename)
    log_file = RotatingFileHandler(filename, backupCount=1)
    if should_rotate:
        log_file.doRollover()
    log_file.setFormatter(log_format)
    log_file.setLevel(loglevel)
    return log_file
示例#3
0
def setup_logging_on_disk(log_dir, backup_count=5):
    from logging.handlers import RotatingFileHandler

    root = logging.getLogger()
    webcontent = logging.getLogger("webcontent")

    class Formatter(logging.Formatter):
        def formatMessage(self, record):
            fmt = ("%(levelname)s %(name)s: [%(url)s] %(message)s"
                   if record.name == "webcontent" else
                   "%(levelname)s: %(message)s")
            return fmt % record.__dict__

    if not os.path.isdir(log_dir):
        os.makedirs(log_dir)

    handler = RotatingFileHandler(os.path.join(log_dir, "log"),
                                  backupCount=backup_count,
                                  delay=True)
    handler.setFormatter(Formatter())
    handler.doRollover()
    handler.setLevel(logging.DEBUG)

    for logger in (root, webcontent):
        logger.addHandler(handler)
示例#4
0
def make_logger(name: str,
                stream_level=logging.WARN,
                file_level=logging.DEBUG):
    """Create a logger for a given name

    The log file on disk will rotate every 65536 bytes or when there is another execution.
    There should always be a historic log file for each of the last two runs. The stream
    handler is conventionally set to a higher level than the file handler, the defaults are
    WARN and DEBUG. This is to reduce chattyness on cli.

    :param name: name of the logger
    :param stream_level: One of logging level [default: logging.WARN]
    :param file_level: One of logging level [default: logging.DEBUG]
    :returns: logging.Logger
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    stream_handler = logging.StreamHandler(sys.stdout)
    stream_handler.setLevel(stream_level)
    stream_handler.setFormatter(formatter)
    logger.addHandler(stream_handler)
    file_handler = RotatingFileHandler(f"{name}.log",
                                       backupCount=1,
                                       maxBytes=65536)
    file_handler.setLevel(file_level)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)
    file_handler.doRollover()
    return logger
示例#5
0
 def __init__(self, ind_logs, logpath, stdout, curCmd):
     self.individual_logs = ind_logs
     self.path = logpath
     self.stdout = stdout
     self.cmd = curCmd
     if ind_logs:
         sys.logger.debug(
             "Starting SensorNode-Output Log in individual logfile mode. Writing logfiles to: %s "
             % self.path
         )  #@UndefinedVariable (this is to ignore the error in eclipse)
         self.loggerdict = dict()
     else:
         sys.logger.debug(
             "Starting SensorNode-Output Log in cumulative logfile mode. Writing logfile to: %s "
             % self.path
         )  #@UndefinedVariable (this is to ignore the error in eclipse)
         #initialize the sensor node logger
         logfile = logpath.rstrip('/') + '/parallel-wsn-sensornodes.log'
         self.snlogger = logging.getLogger('sensornode')
         self.snlogger.setLevel(logging.DEBUG)
         lfh = RotatingFileHandler(logfile,
                                   mode='a',
                                   maxBytes=1000000,
                                   backupCount=5)
         self.snlogger.addHandler(
             lfh
         )  #@UndefinedVariable (this is to ignore the error in eclipse)
         formatter = logging.Formatter('%(asctime)s %(message)s')
         lfh.setFormatter(formatter)
         #don't log to console
         self.snlogger.propagate = False
         #on init start a new file if existing one is not empty
         if os.path.getsize(logfile) > 0:
             lfh.doRollover()
示例#6
0
def get_batch_logger(logdir,
                     project_name,
                     loglevel,
                     backupcount,
                     name=__name__):
    """
    Get rotating file logger for storing logs of mirroring of given project.
    :param logdir: log directory
    :param project_name: name of the project
    :param loglevel: logging level
    :param backupcount count of log files to keep around
    :param name name of the logger
    :return logger
    """

    logger = logging.getLogger(name)

    logfile = os.path.join(logdir, project_name + ".log")

    handler = RotatingFileHandler(logfile,
                                  maxBytes=0,
                                  mode='a',
                                  backupCount=backupcount)
    formatter = logging.Formatter(
        "%(asctime)s - %(levelname)s: "
        "%(message)s", '%m/%d/%Y %I:%M:%S %p')
    handler.setFormatter(formatter)
    handler.doRollover()

    logger.setLevel(loglevel)
    logger.propagate = False
    logger.handlers = []
    logger.addHandler(handler)

    return logger
示例#7
0
def main():
    import argparse

    config = Configuration()

    log_fn = pathlib.Path(config.log_path, "benten-ls.log")
    roll_over = log_fn.exists()

    handler = RotatingFileHandler(log_fn, backupCount=5)
    formatter = logging.Formatter(
        fmt='[%(levelname)-7s] %(asctime)s (%(name)s) %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S')
    handler.setFormatter(formatter)
    if roll_over:
        handler.doRollover()

    # logging.basicConfig(filename=log_fn, filemode="w", level=logging.INFO)
    # logging.basicConfig(level=logging.INFO)

    parser = argparse.ArgumentParser(description="")
    parser.add_argument("--mode",
                        default="stdio",
                        help="communication (stdio|tcp)")
    parser.add_argument("--addr",
                        default=4389,
                        help="server listen (tcp)",
                        type=int)
    parser.add_argument("--debug", action="store_true")

    args = parser.parse_args()

    logging.basicConfig(
        level=(logging.DEBUG if args.debug else logging.WARNING))
    logger.addHandler(handler)

    logger.info(
        f"Benten {__version__}: CWL Language Server from Rabix (Seven Bridges)"
    )
    logger.info(f"ruamel.yaml: {__ruamel_version__}")
    logger.info(f"cwl-format: {__cwl_fmt_version__}")

    config.initialize()

    if args.mode == "stdio":
        logger.info("Reading on stdin, writing on stdout")
        s = LangServer(conn=JSONRPC2Connection(
            ReadWriter(sys.stdin.buffer, sys.stdout.buffer)),
                       config=config)
        s.run()
    elif args.mode == "tcp":
        host, addr = "0.0.0.0", args.addr
        logger.info("Accepting TCP connections on %s:%s", host, addr)
        ForkingTCPServer.allow_reuse_address = True
        ForkingTCPServer.daemon_threads = True
        LangserverTCPTransport.config = config
        s = ForkingTCPServer((host, addr), LangserverTCPTransport)
        try:
            s.serve_forever()
        finally:
            s.shutdown()
示例#8
0
文件: log.py 项目: OpenGrok/OpenGrok
def get_batch_logger(logdir, project_name, loglevel, backupcount,
                     name=__name__):
    """
    Get rotating file logger for storing logs of mirroring of given project.
    :param logdir: log directory
    :param project_name: name of the project
    :param loglevel: logging level
    :param backupcount count of log files to keep around
    :param name name of the logger
    :return logger
    """

    logger = logging.getLogger(name)

    logfile = os.path.join(logdir, project_name + ".log")

    handler = RotatingFileHandler(logfile, maxBytes=0, mode='a',
                                  backupCount=backupcount)
    formatter = logging.Formatter("%(asctime)s - %(levelname)s: "
                                  "%(message)s", '%m/%d/%Y %I:%M:%S %p')
    handler.setFormatter(formatter)
    handler.doRollover()

    logger.setLevel(loglevel)
    logger.propagate = False
    logger.handlers = []
    logger.addHandler(handler)

    return logger
示例#9
0
def configure_logger():
    log_dir = Path(os.path.dirname(__file__)) / 'logs'
    log_file = log_dir / 'build.log'

    if not log_dir.exists():
        log_dir.mkdir(parents=True)
    if log_file.exists():
        do_rollover = True
    else:
        do_rollover = False

    stream_handler = logging.StreamHandler()
    file_handler = RotatingFileHandler(str(log_file),
                                       backupCount=5,
                                       encoding=None,
                                       delay=0)
    if do_rollover:
        file_handler.doRollover()
    formatter = logging.Formatter(fmt=('%(asctime)s.%(msecs)03d '
                                       '%(name)-18s '
                                       '%(levelname)-8s '
                                       '%(threadName)-10s '
                                       '%(message)s'),
                                  datefmt='%H:%M:%S')

    stream_handler.setFormatter(formatter)
    file_handler.setFormatter(formatter)

    logger.setLevel(logging.DEBUG)
    logger.addHandler(stream_handler)
    logger.addHandler(file_handler)
示例#10
0
def init_logger(log_level, log_location, app_name):
    # Getting log level
    log_level = _define_log_level(log_level)

    # Modify logger log level
    logger = logging.getLogger()
    logger.setLevel(log_level)

    # Set file formatter
    formatter = logging.Formatter("%(asctime)s :: %(levelname)s :: " + app_name + " ::  %(message)s")
    log_filename = "%s_log.txt" % app_name
    log_location = expand_var_and_user(log_location)

    if os.path.isdir(log_location):
        log_file_path = os.path.join(log_location, log_filename)
        need_roll = os.path.isfile(log_file_path)

        # Redirect logs into a log file
        file_handler = RotatingFileHandler(log_file_path, backupCount=10, maxBytes=2*1024*1024)
        file_handler.setLevel(log_level)
        file_handler.setFormatter(formatter)
        if need_roll:
            file_handler.doRollover()
        logger.addHandler(file_handler)

        # Redirect logs into the user console
        stream_handler = logging.StreamHandler()
        stream_handler.setLevel(log_level)
        stream_handler.setFormatter(formatter)
        logger.addHandler(stream_handler)
    else:
        raise BotsicoteException("Log directory: %s does not exist, create it before relaunching the program" %
                                 log_location)
示例#11
0
    def logsetup(self, logbasepath, level="INFO"):

        logname="sofa-display"
        log_formatter = logging.Formatter('%(asctime)-6s.%(msecs).03d %(levelname).1s%(lineno)4d: %(message)s','%m/%d %H:%M:%S')
        logpath=os.path.join(logbasepath, logname)
        logfile=os.path.join(logpath,"%s.log" % logname)
        loglink=os.path.join(logbasepath,"%s.log" % logname)
        if not os.path.exists(logpath):
            os.makedirs(logpath)
        #check if a log file already exists and if so rotate it

        needRoll = os.path.isfile(logfile)
        log_handler = RotatingFileHandler(logfile, mode='a', maxBytes=1024*1024, backupCount=5)
        log_handler.setFormatter(log_formatter)
        log_handler.setLevel(getattr(logging,level))
        if needRoll:
            log_handler.doRollover()
            
        console = logging.StreamHandler()
        console.setFormatter(log_handler)
        console.setLevel(logging.INFO)
        
        logging.getLogger(logname).addHandler(console)

        self.log =  logging.getLogger(logname)
        self.log.setLevel(logging.INFO)
        self.log.addHandler(log_handler)
        if not os.path.exists(loglink):
            os.symlink(logfile, loglink)
        
        self.log.info('-- -----------------------------------------------')
示例#12
0
def set_logging(component, logging_level=logging.DEBUG):
    """ Configure logging

        :param component: name of a component
        :type component: str
        :param logging_level: loglevel
        :type logging_level: int

    """

    if not os.path.isdir(LOG_DIR) or not os.access(LOG_DIR, os.W_OK):
        log_handler = logging.NullHandler()
        log_file = ""
    else:
        log_file = "/var/log/blivet-gui/%s.log" % component

        rotate = os.path.isfile(log_file)

        log_handler = RotatingFileHandler(log_file, backupCount=5)
        log_handler.setLevel(logging_level)
        formatter = logging.Formatter('%(levelname)s:%(name)s: %(message)s')
        log_handler.setFormatter(formatter)

        if rotate:
            log_handler.doRollover()

    logger = logging.getLogger(component)
    logger.addHandler(log_handler)
    logger.setLevel(logging_level)

    return log_file, logger
示例#13
0
    def __init__(self, version):

        # disable old log process
        logging.shutdown()

        # Directories
        self.addonDir = join(mw.pm.addonFolder(), "1129289384")
        self.mediaDir = mw.col.media.dir()
        os.makedirs(self.mediaDir, exist_ok=True)

        # Paths
        self.iconPath = join(self.addonDir, r'resources/anki.png')
        self.ankiCsvPath = join(self.addonDir, Constant.ANKI_DECK)

        # Config Logging (Rotate Every 10MB)
        os.makedirs(join(self.addonDir, r'logs'), exist_ok=True)
        self.ankiFlashLog = join(self.addonDir, r'logs/ankiflash.log')

        rfh = RotatingFileHandler(
            filename=self.ankiFlashLog, maxBytes=50000000, backupCount=3, encoding='utf-8')
        should_roll_over = os.path.isfile(self.ankiFlashLog)
        if should_roll_over:
            rfh.doRollover()
        logging.basicConfig(level=logging.INFO,
                            format=u"%(asctime)s - %(threadName)s [%(thread)d] - %(message)s",
                            datefmt="%d-%b-%y %H:%M:%S",
                            handlers=[rfh])

        # Create Generator Dialog
        self.generator = GeneratorDialog(
            version, self.iconPath, self.addonDir, self.mediaDir)
        self.generator.show()
示例#14
0
    def __init__(self,
                 name="logger",
                 level=logging.DEBUG,
                 console_level=logging.INFO,
                 mode='a',
                 config=None):
        self.logger = logging.getLogger(name)
        #防止同一个实例加载好几次的handler
        if len(self.logger.handlers) > 0:
            return None
        # 设置logger的等级
        #super().__init__(name)
        # 注意这各会设置最低的等级,后续的设置只能比这个高
        self.logger.setLevel(level)
        # 组织一个带时间戳的字符串作为日志文件的名字,实现每天记录一个日志文件
        date_time = time.strftime("%Y%m%d", time.localtime(time.time()))
        if config == None or config.noconf:
            log_path_str = os.path.join(
                os.path.abspath(os.path.join(os.getcwd(), "")), "ipbehaviour")
            logmaxBytes = 256 * 1024 * 1024
            logbackupCount = 4
            logrotatestarted = 1
        else:
            log_path_str = config.logFile
            logmaxBytes = config.logmaxBytes
            logbackupCount = config.logbackupCount
            logrotatestarted = config.logrotatestarted

        # python 在创建filehandler时路径不存在会报FileNotFoundError,这里要新建下路径(而具体文件存不存在都时可以的,python会自动创建文件)
        if not os.path.exists(log_path_str):
            os.makedirs(log_path_str)

        logFile = os.path.join(log_path_str, date_time + '.log')
        # 创建一个logging输出到文件的handler并设置等级和输出格式
        # mode属性用于控制写文件的模式,w模式每次程序运行都会覆盖之前的logger,而默认的是a则每次在文件末尾追加

        print(logFile)
        formatter = logging.Formatter(
            '%(asctime)-.19s %(levelname)-.1s %(name)-5.5s %(funcName)-15.15s "%(message)s"'
        )
        fh = RotatingFileHandler(logFile,
                                 'a',
                                 maxBytes=int(logmaxBytes),
                                 backupCount=int(logbackupCount))
        #fh = logging.FileHandler(logFile, mode)

        if logrotatestarted == 1:
            fh.doRollover()
        fh.setLevel(level)
        fh.setFormatter(formatter)
        self.logger.addHandler(fh)
        self.logger.w_filehandler = fh

        # 控制台句柄
        ch = logging.StreamHandler()
        ch.setFormatter(formatter)
        ch.setLevel(console_level)
        self.logger.addHandler(ch)
        self.logger.w_consolehandle = ch
示例#15
0
 def doRollover(self, callback=None):
     newfile = self.get_old_filename() + '.1'
     logger.debug('doing rollover')
     RotatingFileHandler.doRollover(self)
     if newfile is not None:
         if callback is None:
             self.callback(newfile)
         else:
             callback(newfile)
示例#16
0
文件: bouncer.py 项目: fstr1ng/pyfon
def rotateLogs(path, logs_count, owner):
    bounce_log_rotator = RotatingFileHandler(path,
                                             maxBytes=0,
                                             backupCount=logs_count)
    bounce_log_rotator.doRollover()
    uid = pwd.getpwnam(owner).pw_uid
    gid = grp.getgrnam(owner).gr_gid
    os.chown(path, uid, gid)
    os.chmod(path, 0o600)
示例#17
0
 def get_logging_handler(self, log_level=None):
     logfile = os.path.join(self.application_home, 'hydropick.log')
     # make handler
     from logging.handlers import RotatingFileHandler
     handler = RotatingFileHandler(logfile, backupCount=5)
     handler.doRollover()
     handler.setFormatter(self.get_formatter(log_level))
     handler.addFilter(self.get_filter(log_level))
     return handler
示例#18
0
	def shouldRollover(self, record):
		if RotatingFileHandler.shouldRollover(self , record):
			RotatingFileHandler.doRollover(self)

		t = time.strftime("%Y%m%d" , time.localtime(time.time()))
		if (cmp(self.current , t) < 0) :
			return 1

		return 0
示例#19
0
    def log_setup(self, name=None, level="INFO", errorOnly=[], log_path=None):

        if not log_path:
            if hasattr(self, 'log_directory'):
                log_path = self.log_directory

        if not name:
            name = __file__.rsplit('.', 1)[0]

        log_formatter = logging.Formatter(
            '%(asctime)-6s.%(msecs).03d %(filename).8s %(levelname).1s%(lineno)4d: %(message)s',
            '%m/%d %H:%M:%S')
        if not os.path.isdir(log_path):
            os.makedirs(log_path)

        log_file = os.path.join(log_path, "%s.log" % name)

        if not os.path.exists(log_path):
            os.makedirs(log_path)

        if os.path.isfile(log_file):
            roll = True
        else:
            roll = False

        log_handler = RotatingFileHandler(log_file,
                                          mode='a',
                                          maxBytes=1024 * 1024,
                                          backupCount=5)
        log_handler.setFormatter(log_formatter)
        log_handler.setLevel(getattr(logging, level))
        if roll:
            log_handler.doRollover()

        console = logging.StreamHandler()
        console.setFormatter(log_handler)
        console.setLevel(getattr(logging, level))

        logging.getLogger(name).addHandler(console)

        log = logging.getLogger(name)
        log.setLevel(getattr(logging, level))
        log.addHandler(log_handler)

        log.info('-- %s -----------------------------------------------' %
                 name)

        for lg in logging.Logger.manager.loggerDict:
            #self.log.info('.. Active logger: %s' % lg)
            for item in errorOnly:
                if lg.startswith(item):
                    log.debug('.. Logger set to error and above: %s' % lg)
                    logging.getLogger(lg).setLevel(logging.ERROR)
        self.log = log

        return log
示例#20
0
    def doRollover(self):
        """
        Override base class method to make the new log file group writable.
        """
        # Rotate the file first.
        RotatingFileHandler.doRollover(self)

        # Add group write to the current permissions.
        currMode = os.stat(self.baseFilename).st_mode
        os.chmod(self.baseFilename, 0777)
示例#21
0
def get_rot_file_logger(name, path):
    """Returns a logger with a rotating file handler"""
    logger = logging.getLogger(name)

    handler = RotatingFileHandler(path, backupCount=5, encoding="utf-8")
    handler.doRollover()  # rollover existing log files
    handler.terminator = ""  # click.echo already adds a newline
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)
    return logger
示例#22
0
 def _get_handler(self, log_file, count=3):
     _handler = RotatingFileHandler(log_file,
                                    delay=True,
                                    mode="w",
                                    backupCount=count,
                                    encoding="utf-8")
     self._formatter(_handler)
     if os.path.isfile(log_file):
         _handler.doRollover()
     return _handler
示例#23
0
class LoggerFactory:
    initialized = False

    def __init__(self,
                 folder: str = '.',
                 filename: str = 'application.log',
                 console_level: str = "INFO",
                 file_level: str = "INFO"):
        if not self.initialized:
            self.initialized = True
            self._log_folder = folder
            self._log_filename = folder + os.sep + filename
            if not os.path.exists(self._log_folder):
                os.mkdir(self._log_folder)
            should_roll_over = os.path.isfile(self._log_filename)
            self._formatter = logging.Formatter(LOG_FORMAT)
            self._console_handler = logging.StreamHandler()
            self._console_handler.setLevel(
                LoggerFactory._translate_level(console_level))
            self._console_handler.setFormatter(self._formatter)
            self._file_handler = RotatingFileHandler(self._log_filename,
                                                     mode='a',
                                                     backupCount=50)
            self._file_handler.setLevel(
                LoggerFactory._translate_level(file_level))
            self._file_handler.setFormatter(self._formatter)
            if should_roll_over:
                self._file_handler.doRollover()

    def __new__(cls, *args, **kwargs):
        if not hasattr(cls, '_inst'):
            cls._inst = super(LoggerFactory, cls).__new__(cls)
        return cls._inst

    @staticmethod
    def _translate_level(level):
        lv = logging.INFO
        if level == "DEBUG":
            lv = logging.DEBUG
        elif level == "WARNING":
            lv = logging.WARNING
        elif level == "ERROR":
            lv = logging.ERROR
        elif level == "CRITICAL":
            lv = logging.CRITICAL
        return lv

    def get_new_logger(self, name: str):
        logger = logging.getLogger(name)
        logger.setLevel(logging.DEBUG)
        logger.addHandler(self._console_handler)
        logger.addHandler(self._file_handler)
        return logger
示例#24
0
 def _logging_handler_default(self):
     from logging.handlers import RotatingFileHandler
     logfile = os.path.join(self.application_home, 'hydropick.log')
     print logfile, self.application_home
     handler = RotatingFileHandler(logfile, backupCount=5)
     handler.doRollover()
     # format output
     datefmt = '%Y%m%d:%H%M%S'
     line_fmt = '%(asctime)s :: %(name)s : %(levelname)s : %(message)s'
     formatter = logging.Formatter(line_fmt, datefmt=datefmt)
     handler.setFormatter(formatter)
     return handler
示例#25
0
def initLogger():
    logname = getProjectRelativePath("app.log")
    handler = RotatingFileHandler(logname,
                                  mode="w",
                                  maxBytes=100000,
                                  backupCount=1)
    handler.suffix = "%Y%m%d"
    logging.basicConfig(
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        level=logging.INFO,
        handlers=[handler])
    if os.path.isfile(logname):  # log already exists, roll over!
        handler.doRollover()
示例#26
0
文件: utils.py 项目: kfatehi/miro
 def doRollover(self):
     # If you shut down Miro then start it up again immediately
     # afterwards, then we get in this squirrely situation where
     # the log is opened by another process.  We ignore the
     # exception, but make sure we have an open file.  (bug #11228)
     try:
         RotatingFileHandler.doRollover(self)
     except WindowsError:
         if not self.stream or self.stream.closed:
             self.stream = open(self.baseFilename, "a")
         try:
             RotatingFileHandler.doRollover(self)
         except WindowsError:
             pass
示例#27
0
def setup_custom_logger(logfile=None, quiet=False):
    global logfilename
    maxBytes = config.settings.main.logging.logMaxSize * 1024
    rotateAfterDays = config.settings.main.logging.logRotateAfterDays
    backupCount = config.settings.main.logging.keepLogFiles
    logfileMessage = None
    if logfile is None:
        logfilename = config.settings.main.logging.logfilename
    else:
        logfilename = logfile
        logfileMessage = "Logging to file %s as defined in the command line" % logfilename
    console_log_level = config.settings.main.logging.consolelevel.upper()
    file_log_level = config.settings.main.logging.logfilelevel.upper()
    # set console log level from config file
    if not quiet:
        console_logger.setLevel(console_log_level)
    logger.setLevel(console_log_level)
    # add log file handler
    if rotateAfterDays is None:
        file_logger = RotatingFileHandler(filename=logfilename,
                                          maxBytes=maxBytes,
                                          backupCount=backupCount)
    else:
        file_logger = TimedRotatingFileHandler(filename=logfilename,
                                               when="d",
                                               interval=rotateAfterDays,
                                               backupCount=backupCount)
    file_logger.setFormatter(logging.Formatter(LOGGER_DEFAULT_FORMAT))
    file_logger.setLevel(file_log_level)
    logger.addHandler(file_logger)
    # we need to set the global log level to the lowest defined
    if getattr(logging, file_log_level) < getattr(logging, console_log_level):
        logger.setLevel(file_log_level)
    # add sensitive data filter
    logger.addFilter(SensitiveDataFilter())
    logging.getLogger("requests").setLevel(logging.CRITICAL)
    logging.getLogger("urllib3").setLevel(logging.CRITICAL)
    logging.getLogger('werkzeug').setLevel(logging.CRITICAL)
    logger.info(logfileMessage)
    if not config.settings.main.isFirstStart and config.settings.main.logging.rolloverAtStart:
        logger.info("Starting new log file as configured")
        file_logger.doRollover()
    if config.settings.main.logging.logfileUmask and platform.system().lower(
    ) == "linux":
        umask = config.settings.main.logging.logfileUmask
        if not umask.startswith("0"):
            umask = "0" + umask
        logger.info("Setting umask of log file %s to %s", logfilename, umask)
        os.chmod(logfilename, int(umask, 8))
    return logger
示例#28
0
 def doRollover(self):
     # If you shut down Miro then start it up again immediately
     # afterwards, then we get in this squirrely situation where
     # the log is opened by another process.  We ignore the
     # exception, but make sure we have an open file.  (bug #11228)
     try:
         RotatingFileHandler.doRollover(self)
     except WindowsError:
         if not self.stream or self.stream.closed:
             self.stream = open(self.baseFilename, "a")
         try:
             RotatingFileHandler.doRollover(self)
         except WindowsError:
             pass
示例#29
0
    def setupLogging(self):
        """
        setup logging file
        """
        try:
            hdlr = RotatingFileHandler(self.configDir / 'exe.log', 'a', 500000,
                                       10)
            hdlr.doRollover()
        except OSError:
            # ignore the error we get if the log file is logged
            hdlr = logging.FileHandler(self.configDir / 'exe.log')

        format = "%(asctime)s %(name)s %(levelname)s %(message)s"
        log = logging.getLogger()
        hdlr.setFormatter(logging.Formatter(format))
        log.addHandler(hdlr)

        loggingLevels = {
            "DEBUG": logging.DEBUG,
            "INFO": logging.INFO,
            "WARNING": logging.WARNING,
            "ERROR": logging.ERROR,
            "CRITICAL": logging.CRITICAL
        }

        if self.configParser.has_section('logging'):
            for logger, level in self.configParser._sections["logging"].items(
            ):
                if logger == "root":
                    logging.getLogger().setLevel(loggingLevels[level])
                else:
                    logging.getLogger(logger).setLevel(loggingLevels[level])
        if not G.application.portable:
            log.info("************** eXe logging started **************")
            log.info("version     = %s" % version.version)
            log.info("configPath  = %s" % self.configPath)
            log.info("exePath     = %s" % self.exePath)
            log.info("libPath     = %s" %
                     Path(twisted.__path__[0]).splitpath()[0])
            log.info("browser     = %s" % self.browser)
            log.info("webDir      = %s" % self.webDir)
            log.info("jsDir       = %s" % self.jsDir)
            log.info("localeDir   = %s" % self.localeDir)
            log.info("port        = %d" % self.port)
            log.info("dataDir     = %s" % self.dataDir)
            log.info("configDir   = %s" % self.configDir)
            log.info("locale      = %s" % self.locale)
            log.info("internalAnchors = %s" % self.internalAnchors)
            log.info("License = %s" % self.defaultLicense)
示例#30
0
def init(base_path, file_name):

    log = logging.getLogger(NAME)
    log.setLevel(logging.DEBUG)

    log_formatter = logging.Formatter(
        "%(asctime)s - %(levelname)s :: %(message)s")
    log_file = os.path.join(base_path, file_name)

    handler = RotatingFileHandler(log_file, backupCount=3)

    handler.doRollover()
    handler.setFormatter(log_formatter)

    log.addHandler(handler)
示例#31
0
    def doRollover(self):
        RotatingFileHandler.doRollover(self)
        try:
            dirPath = os.path.dirname(self.baseFilename)
            baseName = os.path.basename(self.baseFilename)
            fileNameList = os.listdir(dirPath)
            for fileName in fileNameList:
                if not re.match('%s.\\\\w+$' % baseName, fileName):
                    continue
                rollingFilePath = os.path.join(dirPath, fileName)
                zipCompressFile(rollingFilePath, baseName)

            removeExceededBackupFiles(dirPath, baseName, self.backupCount)
        except Exception:
            getDefaultLogger().exception('backup trace catch an exception.')
示例#32
0
def setup_logger(name, log_file, level=logging.INFO):
    """
        To setup as many loggers as you want
    """
    handler = logging.FileHandler(log_file, mode='a')
    # rotate into 10 files when file reaches 50MB
    handler = RotatingFileHandler(log_file, maxBytes=50*1024*1024, backupCount=10)
    handler.setFormatter(formatter)
    handler.doRollover()

    logger = logging.getLogger(name)
    logger.setLevel(level)
    logger.addHandler(handler)

    return logger
示例#33
0
    def _init_logging(self):
        # add a file logger for our run
        log_file_handler = RotatingFileHandler(self.project_workspace /
                                               'log.txt',
                                               backupCount=5,
                                               delay=True)
        log_file_handler.doRollover()
        logging.getLogger('fab').addHandler(log_file_handler)

        logger.info(f"{datetime.now()}")
        if self.multiprocessing:
            logger.info(f'machine cores: {cpu_count()}')
            logger.info(f'available cores: {len(os.sched_getaffinity(0))}')
            logger.info(f'using n_procs = {self.n_procs}')
        logger.info(f"workspace is {self.project_workspace}")
示例#34
0
def setup_logging(logdir=NW_DEFAULT_LOG_FILE_PATH, 
                  filelog=True, 
                  rotate=True,
                  maxBytes=NW_DEFAULT_ROTATING_FILES_MAX_BYTE,
                  backupCount=NW_DEFAULT_ROTATING_BACKUP_NUM,
                  loglevel=NW_DEFAULT_LOG_LEVEL,
                  console=False
                  ):
    
    # Make sure this function is only called once
    global _nwLogger_Configured
    if _nwLogger_Configured:
        return
    
    # Drop any previous logging setup
    root = logging.getLogger()
    if root.handlers:
        for handler in root.handlers:
            root.removeHandler(handler)
    
    # Setup the root logger
    log = logging.getLogger()
    log.setLevel(loglevel)

    # Setup the logger format
    log_formatter = logging.Formatter(NW_DEFAULT_LOG_MSG_FORMAT)

    if filelog:
        if not rotate: backupCount = 0
        logdir = os.path.abspath(logdir)
        if not os.path.exists(logdir): os.mkdir(logdir)
        logfile = _nwBuildFullPath(logdir)
        file_hdlr = RotatingFileHandler(filename=logfile, 
                                        maxBytes=maxBytes, 
                                        backupCount=backupCount,
                                        mode="a"
                                        )
        file_hdlr.doRollover()
        file_hdlr.setFormatter(log_formatter)
        log.addHandler(file_hdlr)
        log.info("Logger Initialized!")

    if console:
        console_hdlr = logging.StreamHandler()
        console_hdlr.setFormatter(log_formatter)
        log.addHandler(console_hdlr)

    _nwLogger_Configured = True
示例#35
0
def setup_local(name=__name__,
                logdir='log',
                console=True,
                text=True,
                loglevel=logging.DEBUG,
                logfile=None):
    """
        function:
                prepares a logger that can print to both the console and a logfile
        parameters:
                name - the logger's name (for the call logging.getLogger(name))
                logdir - location for the output logfile (default is current dir)
                console - print to console
                text - print to logfile
                loglevel - level for logger (logging.DEBUG)
                logfile - name of logfile (default=name + '.log')
        use:
                mylogger.setup(name, etc.)
                logger = mylogger.getLogger(name)
                :param name:
                :param logdir:
                :param console:
                :param text:
                :param loglevel:
                :param logfile:
        """
    logdir = os.path.abspath(logdir)
    os.makedirs(logdir, exist_ok=True)
    logger = logging.getLogger(name)
    logger.setLevel(loglevel)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')

    if not logfile:
        logfile = name + '.log'

    if text:
        txt_handler = RotatingFileHandler(os.path.join(logdir, logfile),
                                          backupCount=5)
        txt_handler.doRollover()
        txt_handler.setFormatter(formatter)
        logger.addHandler(txt_handler)

    if console:
        scrn_handler = logging.StreamHandler()
        scrn_handler.setFormatter(formatter)
        logger.addHandler(scrn_handler)

    return logging.getLogger(name)
示例#36
0
def configure(stdout: bool = True,
              rotating=False,
              loglevel: str = 'INFO') -> None:
    '''configure logging'''

    dir = 'log'
    filename = 'client_gps.log'

    if not os.path.isdir(dir):
        try:
            os.makedirs(dir)
        except OSError:
            raise Exception(f'Creation of the log directory "{dir}" failed')

    if not filename.endswith('.log'):
        filename += '.log'

    log_exists = os.path.isfile(os.path.join(dir, filename))
    format = '%(asctime)s: %(levelname)s [%(name)s] %(message)s'
    formatter = logging.Formatter(format)

    if rotating:
        handler = RotatingFileHandler(filename=os.path.join(dir, filename),
                                      mode='a',
                                      maxBytes=5 * 1024 * 1024,
                                      backupCount=2)
        handler.setFormatter(formatter)
        if log_exists:
            handler.doRollover()
        logging.getLogger().addHandler(handler)
    else:
        logging.basicConfig(filename=os.path.join(dir, filename),
                            level=logging.INFO,
                            format=format)

    if stdout:
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)

    # set log level
    numeric_level = getattr(logging, loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise Exception(loglevel, 'invalid log level')
    logging.getLogger().setLevel(numeric_level)

    # always log version and command line arguments
    logging.getLogger().info(f'arguments: {sys.argv[1:]}')
示例#37
0
文件: config.py 项目: RichDijk/eXe
    def setupLogging(self):
        """
        setup logging file
        """
        try:
            hdlr = RotatingFileHandler(self.configDir / 'exe.log', 'a', 500000,
                                       10)
            hdlr.doRollover()
        except OSError:
            # ignore the error we get if the log file is logged
            hdlr = logging.FileHandler(self.configDir / 'exe.log')

        format = "%(asctime)s %(name)s %(levelname)s %(message)s"
        log = logging.getLogger()
        hdlr.setFormatter(logging.Formatter(format))
        log.addHandler(hdlr)

        loggingLevels = {
            "DEBUG": logging.DEBUG,
            "INFO": logging.INFO,
            "WARNING": logging.WARNING,
            "ERROR": logging.ERROR,
            "CRITICAL": logging.CRITICAL
        }

        if self.configParser.has_section('logging'):
            for logger, level in self.configParser._sections["logging"].items(
            ):
                if logger == "root":
                    logging.getLogger().setLevel(loggingLevels[level])
                else:
                    logging.getLogger(logger).setLevel(loggingLevels[level])
        if not G.application.portable:
            log.info("************** eXe logging started **************")
            log.info("version     = %s" % version.version)
            log.info("configPath  = %s" % self.configPath)
            log.info("exePath     = %s" % self.exePath)
            log.info(
                "libPath     = %s" % Path(twisted.__path__[0]).splitpath()[0])
            log.info("browser     = %s" % self.browser)
            log.info("webDir      = %s" % self.webDir)
            log.info("jsDir       = %s" % self.jsDir)
            log.info("localeDir   = %s" % self.localeDir)
            log.info("port        = %d" % self.port)
            log.info("dataDir     = %s" % self.dataDir)
            log.info("configDir   = %s" % self.configDir)
            log.info("locale      = %s" % self.locale)
            log.info("internalAnchors = %s" % self.internalAnchors)
示例#38
0
文件: log.py 项目: fhirschmann/penchy
def configure_logging(level=logging.INFO, logfile=None):  # pragma: no cover
    """
    Configure the root logger for our purposes.
    """
    logging.root.setLevel(level)
    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s')
    ch = logging.StreamHandler()
    ch.setFormatter(formatter)
    logging.root.addHandler(ch)

    if logfile:
        ch2 = RotatingFileHandler(logfile, backupCount=10)
        ch2.doRollover()
        ch2.setFormatter(formatter)
        logging.root.addHandler(ch2)

    logging.getLogger('paramiko.transport').setLevel(logging.ERROR)
示例#39
0
 def create_ind_logfile(self, node_name):
     #initialize this sensor node's logger
     logfile = self.path.rstrip('/')+'/parallel-wsn-'+node_name+'.log'
     snlogger = logging.getLogger(node_name)
     snlogger.setLevel(logging.DEBUG)
     lfh = RotatingFileHandler(logfile, mode='a', maxBytes=1000000, backupCount=5)        
     snlogger.addHandler(lfh) #@UndefinedVariable (this is to ignore the error in eclipse)
     formatter = logging.Formatter('%(asctime)s %(message)s')
     lfh.setFormatter(formatter)
     
     #don't log to console
     sys.logger.propagate = False
     
     #on init start a new file if existing one is not empty
     if os.path.getsize(logfile) > 0:
         lfh.doRollover()
         
     return snlogger 
示例#40
0
文件: log.py 项目: gspu/nzbhydra
def setup_custom_logger(logfile=None, quiet=False):
    global logfilename
    maxBytes = config.settings.main.logging.logMaxSize * 1024
    rotateAfterDays = config.settings.main.logging.logRotateAfterDays
    backupCount = config.settings.main.logging.keepLogFiles
    logfileMessage = None
    if logfile is None:
        logfilename = config.settings.main.logging.logfilename
    else:
        logfilename = logfile
        logfileMessage = "Logging to file %s as defined in the command line" % logfilename
    console_log_level = config.settings.main.logging.consolelevel.upper()
    file_log_level = config.settings.main.logging.logfilelevel.upper()
    # set console log level from config file
    if not quiet:
        console_logger.setLevel(console_log_level)
    logger.setLevel(console_log_level)
    # add log file handler
    if rotateAfterDays is None:
        file_logger = RotatingFileHandler(filename=logfilename, maxBytes=maxBytes, backupCount=backupCount)
    else:
        file_logger = TimedRotatingFileHandler(filename=logfilename, when="d", interval=rotateAfterDays, backupCount=backupCount)
    file_logger.setFormatter(logging.Formatter(LOGGER_DEFAULT_FORMAT))
    file_logger.setLevel(file_log_level)
    logger.addHandler(file_logger)
    # we need to set the global log level to the lowest defined
    if getattr(logging, file_log_level) < getattr(logging, console_log_level):
        logger.setLevel(file_log_level)
    # add sensitive data filter
    logger.addFilter(SensitiveDataFilter())
    logging.getLogger("requests").setLevel(logging.CRITICAL)
    logging.getLogger("urllib3").setLevel(logging.CRITICAL)
    logging.getLogger('werkzeug').setLevel(logging.CRITICAL)
    logger.info(logfileMessage)
    if not config.settings.main.isFirstStart and config.settings.main.logging.rolloverAtStart:
        logger.info("Starting new log file as configured")
        file_logger.doRollover()
    if config.settings.main.logging.logfileUmask and platform.system().lower() == "linux":
        umask = config.settings.main.logging.logfileUmask
        if not umask.startswith("0"):
            umask = "0" + umask
        logger.info("Setting umask of log file %s to %s", logfilename, umask)
        os.chmod(logfilename, int(umask, 8))
    return logger
示例#41
0
def setupLogging(logdir=None, scrnlog=True, txtlog=True, loglevel=logging.DEBUG):
    logdir = os.path.abspath(logdir)
    if not os.path.exists(logdir):
        os.mkdir(logdir)
    log = logging.getLogger('scrapper')
    log.setLevel(loglevel)
    log_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s :: %(message)s")

    if txtlog:
        txt_handler = RotatingFileHandler(os.path.join(logdir, "scrapper.log"), backupCount=5)
        txt_handler.doRollover()
        txt_handler.setFormatter(log_formatter)
        log.addHandler(txt_handler)
        log.info("Logger initialised.")

    if scrnlog:
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(log_formatter)
        log.addHandler(console_handler)
示例#42
0
def get_config_parser(filepath="/etc/cinder/cinder.conf", write_on_exit=False):
    from ConfigParser import RawConfigParser
    from logging.handlers import RotatingFileHandler
    try:
        from collections import OrderedDict
    except ImportError:
        from .cinder.collections import OrderedDict

    parser = RawConfigParser(dict_type=OrderedDict)
    parser.optionxform = str    # make options case-sensitive
    parser.read(filepath)
    try:
        yield parser
    finally:
        if write_on_exit:
            handler = RotatingFileHandler(filepath, mode='a', maxBytes=0, backupCount=10)
            handler.doRollover()
            with open(filepath, 'w') as fd:
                parser.write(fd)
示例#43
0
def GetLogger(logfile=None, debug=False):
    """Initialise the root logger
        
        Option Parameters:
            logfile: the full path to the logfile to write, if not supplied logging will be to console only
            debug: enable the output of debug log messages
        
        Returns: The root logger class
    """

    # setup a root logger - this will capture output from all loggers
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    
    # create formatter
    formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(module)-16s %(funcName)-20s %(message)s')

    if logfile != None:
        # create file handler which logs even debug messages
        fh = RotatingFileHandler(logfile, maxBytes=0, backupCount=14)
        fh.doRollover()
        fh.setFormatter(formatter)
        
        if debug:
            fh.setLevel(logging.DEBUG)
        else:
            fh.setLevel(logging.INFO)
        
        logger.addHandler(fh)

    # create console handler with a higher log level
    ch = logging.StreamHandler()    
    ch.setFormatter(formatter)
    
    if debug:
        ch.setLevel(logging.DEBUG)
    else:
        ch.setLevel(logging.INFO)
        
    logger.addHandler(ch)
    
    return logger
示例#44
0
def initRotatingLogger(logName, fileName, logDir=None, toScreen=True, toText=True, logLevel=logging.WARNING):

    if not os.path.exists(logDir): os.makedirs(os.path.abspath(logDir))

    logger = logging.getLogger(logName)
    logger.setLevel(logLevel)

    log_formatter = logging.Formatter("%(asctime)s - %(levelname)s :: %(message)s")

    if toText:
        txt_handler = RotatingFileHandler(os.path.join(logDir, fileName), backupCount=5)
        txt_handler.doRollover()
        txt_handler.setFormatter(log_formatter)
        logger.addHandler(txt_handler)
        logger.info("Logger initialized.")

    if toScreen:
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(log_formatter)
        logger.addHandler(console_handler)
        
    return logger
示例#45
0
def _configure_logger(l, config=None, level=None):
    verbose = 0
    log_file = None
    quiet = False

    if config:
        verbose = config.verbose
        log_file = config.log_file
        quiet = config.quiet

    if level:
        log_level = level
    else:
        if verbose == 0:
            log_level = logging.ERROR
        if verbose == 1:
            log_level = logging.WARN
        if verbose == 2:
            log_level = logging.INFO
        if verbose > 2:
            log_level = logging.DEBUG

    if log_file:
        max_size = 1 * 1024 * 1024 # 1MB
        filehandler = RotatingFileHandler(log_file, maxBytes=max_size,
                                          backupCount=5)
        filehandler.addFilter(context_filter)
        l.addHandler(filehandler)
        if os.path.getsize(log_file) > max_size/2:
            filehandler.doRollover()

    if quiet:
        l.removeHandler(consolehandler)

    l.setLevel(log_level)

    logger.info('Finished setting up logger %s %s %s', l.name, log_level,
                log_file)
示例#46
0
 def __init__(self, ind_logs, logpath, stdout, curCmd):
     self.individual_logs = ind_logs
     self.path = logpath
     self.stdout = stdout
     self.cmd = curCmd
     if ind_logs:
         sys.logger.debug("Starting SensorNode-Output Log in individual logfile mode. Writing logfiles to: %s " %self.path) #@UndefinedVariable (this is to ignore the error in eclipse)
         self.loggerdict = dict()
     else:
         sys.logger.debug("Starting SensorNode-Output Log in cumulative logfile mode. Writing logfile to: %s " %self.path) #@UndefinedVariable (this is to ignore the error in eclipse)
         #initialize the sensor node logger
         logfile = logpath.rstrip('/')+'/parallel-wsn-sensornodes.log'
         self.snlogger = logging.getLogger('sensornode')
         self.snlogger.setLevel(logging.DEBUG)
         lfh = RotatingFileHandler(logfile, mode='a', maxBytes=1000000, backupCount=5)        
         self.snlogger.addHandler(lfh) #@UndefinedVariable (this is to ignore the error in eclipse)
         formatter = logging.Formatter('%(asctime)s %(message)s')
         lfh.setFormatter(formatter)
         #don't log to console
         self.snlogger.propagate = False
         #on init start a new file if existing one is not empty
         if os.path.getsize(logfile) > 0:
             lfh.doRollover()
示例#47
0
 def doRollover(self):
   RotatingFileHandler.doRollover(self)
   
   # Compress the old log.
   for i in range(self.backupCount - 1, 0, -1):
     sfn = "%s.%d.z" % (self.baseFilename, i)
     dfn = "%s.%d.z" % (self.baseFilename, i + 1)
     if os.path.exists(sfn):
       if os.path.exists(dfn):
         os.remove(dfn)
       os.rename(sfn, dfn)
     
   old_log = self.baseFilename + ".1"
   if self.compress_cls == gzip:
     output = self.compress_cls.open(old_log + '.z', 'w')
     with open(old_log) as log:
       output.writelines(log)
       output.close()
   if self.compress_cls == zipfile:
     output = self.compress_cls.ZipFile(old_log + '.z', 'w', zipfile.ZIP_DEFLATED)
     output.write(old_log, os.path.split(self.baseFilename)[1])
     output.close() 
   os.remove(old_log)
示例#48
0
 def setupLogging(self):
     """
     setup logging file
     """
     try:
         hdlr = RotatingFileHandler(self.configDir / "exe.log", "a", 500000, 10)
         hdlr.doRollover()
     except OSError:
         hdlr = logging.FileHandler(self.configDir / "exe.log")
     format = "%(asctime)s %(name)s %(levelname)s %(message)s"
     log = logging.getLogger()
     hdlr.setFormatter(logging.Formatter(format))
     log.addHandler(hdlr)
     loggingLevels = {
         "DEBUG": logging.DEBUG,
         "INFO": logging.INFO,
         "WARNING": logging.WARNING,
         "ERROR": logging.ERROR,
         "CRITICAL": logging.CRITICAL,
     }
     if self.configParser.has_section("logging"):
         for logger, level in self.configParser._sections["logging"].items():
             if logger == "root":
                 logging.getLogger().setLevel(loggingLevels[level])
             else:
                 logging.getLogger(logger).setLevel(loggingLevels[level])
     log.info("************** eXe logging started **************")
     log.info("configPath  = %s" % self.configPath)
     log.info("exePath     = %s" % self.exePath)
     log.info("browserPath = %s" % self.browserPath)
     log.info("webDir      = %s" % self.webDir)
     log.info("xulDir      = %s" % self.xulDir)
     log.info("localeDir   = %s" % self.localeDir)
     log.info("port        = %d" % self.port)
     log.info("dataDir     = %s" % self.dataDir)
     log.info("configDir   = %s" % self.configDir)
     log.info("locale      = %s" % self.locale)
示例#49
0
 def _initLogging(self, verbose=False, logfile=None, logfilecount=0):
     log = logging.getLogger()
     if logfile == "console":
         h = logging.StreamHandler()
     elif logfile is not None:
         from logging.handlers import RotatingFileHandler
         doRotation = True if os.path.exists(logfile) else False
         h = RotatingFileHandler(logfile, backupCount=logfilecount)
         if doRotation:
             h.doRollover()
     else:
         return
         
     if verbose:
         log.setLevel(logging.DEBUG)
     else:
         log.setLevel(logging.INFO)
         
     formatter = logging.Formatter("%(asctime)s - %(name)s "
                                   "- %(levelname)s - %(message)s")
     h.setFormatter(formatter)
     log.addHandler(h)
         
         
示例#50
0
#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html

# set logger
import logging
from logging.handlers import RotatingFileHandler

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

fh = RotatingFileHandler('debug.log', backupCount=30)
fh.setLevel( logging.DEBUG)
fh.setFormatter( formatter )
fh.doRollover()
logger.addHandler(fh)


BOT_NAME = 'crawlers'

SPIDER_MODULES = ['crawlers.spiders']
NEWSPIDER_MODULE = 'crawlers.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36' 

# Obey robots.txt rules
ROBOTSTXT_OBEY = False
示例#51
0
        def doRollover(self):
            """
            Rename every backups so the current log can be promoted to backup number one.

            The new log file is empty. If this process fails due to any I/O error, rollover is
            deactivated for this handler and logs will be appended to the current log file indefinitely.
            """

            temp_backup_name = "%s.%s" % (self.baseFilename, uuid.uuid4())

            # We need to close the file before renaming it (windows!)
            if self.stream:
                self.stream.close()
                self.stream = None

            # Before doing the rollover, check if the first file will fail at all.
            # If it does, then it is a good thing that we checked otherwise the last
            # backup would have been blown away before encountering the error.

            # Take the scenario where there's only one backup. This means that
            # doRollover would first delete the backup (.1) file so it can make
            # room for the main file to be renamed to .1. However, if the main file
            # can't be renamed, we've effectively lost 50% of the logs we had, which
            # is not cool. Since most of the time only the first file will be locked,
            # we will try to rename it first. If that fails right away as expected,
            # we don't try any rollover and append to the current log file.
            # and raise the _disable_rollover flag.

            try:
                os.rename(self.baseFilename, temp_backup_name)
            except:
                # It failed, so we'll simply append from now on.
                log.debug(
                    "Cannot rotate log file '%s'. Logging will continue to this file, "
                    "exceeding the specified maximum size", self.baseFilename, exc_info=True
                )
                self._handle_rename_failure("a", disable_rollover=True)
                return

            # Everything went well, so now simply move the log file back into place
            # so doRollover can do its work.
            try:
                os.rename(temp_backup_name, self.baseFilename)
            except:
                # For some reason we couldn't move the backup in its place.
                log.debug(
                    "Unexpected issue while rotating log file '%s'. Logging will continue to this file, "
                    "exceeding the specified maximum size", self.baseFilename, exc_info=True
                )
                # The main log file doesn't exist anymore, so create a new file.
                # Don't disable the rollover, this has nothing to do with rollover
                # failing.
                self._handle_rename_failure("w")
                return

            # Python 2.6 expects the file to be opened during rollover.
            if not self.stream and sys.version_info[:2] < (2, 7):
                self.mode = "a"
                self.stream = self._open()

            # Now, that we are back in the original state we were in,
            # were pretty confident that the rollover will work. However, due to
            # any number of reasons it could still fail. If it does, simply
            # disable rollover and append to the current log.
            try:
                RotatingFileHandler.doRollover(self)
            except:
                # Something probably failed trying to rollover the backups,
                # since the code above proved that in theory the main log file
                # should be renamable. In any case, we didn't succeed in renaming,
                # so disable rollover and reopen the main log file in append mode.
                log.debug(
                    "Cannot rotate log file '%s'. Logging will continue to this file, "
                    "exceeding the specified maximum size", self.baseFilename, exc_info=True
                )
                self._handle_rename_failure("a", disable_rollover=True)
示例#52
0
文件: smolt.py 项目: gdenning/mythtv
            '.json':serialize(send_host_obj, human=True),
            '-distro.html':self.get_distro_specific_html(),
            '.rst':'\n'.join(map(to_ascii, self.getProfile())),
        }
        logdir = os.path.expanduser('~/.smolt/')
        try:
            if not os.path.exists(logdir):
                os.mkdir(logdir, 1700)

            for k, v in log_matrix.items():
                filename = os.path.expanduser(os.path.join(
                        logdir, 'submission%s' % k))
                r = RotatingFileHandler(filename, \
                        maxBytes=1000000, backupCount=9)
                r.stream.write(v)
                r.doRollover()
                r.close()
                os.remove(filename)
        except:
            pass
        del logdir
        del log_matrix


        debug('sendHostStr: %s' % serialized_host_obj_machine)
        debug('Sending Host')

        if batch:
            entry_point = "/client/batch_add_json"
            logging.debug('Submitting in asynchronous mode')
        else:
示例#53
0
 if using_engage:
   root_logger.setLevel(min(root_logger.level, logging.DEBUG))
   if running_from_command_line:
     # we only create the master.log logfile if we are running directly in the command
     # line. If we are called as a library, we let the caller setup the logfile
     log_dir = engage_file_locator.get_log_directory()
     if not os.path.exists(log_dir):
       os.makedirs(log_dir)
     log_file = os.path.join(log_dir, "master.log")
     do_log_rollover = os.path.exists(log_file)
     handler = RotatingFileHandler(log_file, backupCount=5)
     formatter = logging.Formatter(defs.DATABLOX_LOG_FORMAT,
                                   defs.DATABLOX_LOG_DATEFMT)
     handler.setFormatter(formatter)
     if do_log_rollover: # we do a rollover each time the master is run
       handler.doRollover()
     handler.setLevel(logging.DEBUG)
     root_logger.addHandler(handler)
 else:
   root_logger.setLevel(min(root_logger.level, log_levels[options.log_level]))
   
 Master(bloxpath, args[0], args[1:], using_engage,
        _log_level=log_levels[options.log_level],
        _debug_block_list=debug_block_list,
        reuse_existing_installs=reuse_existing_installs,
        poll_interval=options.poll_interval,
        stats_multiple=options.stats_multiple,
        block_args=block_args,
        loads_file=options.loads_file,
        log_stats_hist=options.log_stats_hist,
        time_limit=options.time_limit,
示例#54
0
 def run(self):     
     
     optparser = optparse.OptionParser(usage="usage: %prog [-chHl] start|stop|restart|status", conflict_handler='resolve')
     optparser.add_option('-c', '--configfile', dest='cf', type='string', action='store', metavar='config-file', help='specify the path to the config file (default is ./parallel-wsn.config)', default=(str(os.getcwd())+'/parallel-wsn.config'))
     optparser.add_option('-l', '--logpath', dest='logpath', type='string', action='store', help='define the path for the logfiles (default is ./ (CWD))') 
     optparser.add_option('-h', '--hosts', dest='host_files', action='append', metavar='HOST_FILE', help='hosts file (each line "[user@]host[:port]")')
     optparser.add_option('-H', '--host', dest='host_strings', action='append',metavar='HOST_STRING', help='additional host entries ("[user@]host[:port]")')
     optparser.add_option('-u', '--user', dest='user', help='username (OPTIONAL)')
     optparser.add_option('-t', '--waiting-time', dest='waiting_time', help='define the time in seconds for parallel-wsn to wait for answers from the sensor nodes (OPTIONAL)')
     optparser.add_option('-v', '--verbose', dest='verbose', action='store_true', help='turn on warning and diagnostic messages (OPTIONAL)')        
     
     (options, args) = optparser.parse_args(sys.argv)
     print options
     print args
     
     #check if verbose option was set
     if options.verbose:
         sys.log_lvl = logging.DEBUG
         
     #init parser to parse the config-file  
     cp = ConfigParser.RawConfigParser()
     print ("reading configuration file from %s" %options.cf) #@UndefinedVariable (this is to ignore the error in eclipse) #TODO: if verbose...
     res = cp.read(options.cf)    
     if len(res) == 0:
         print "Could not read config-file at %s!\n" % options.cf        
         sys.exit(-1)
         
     #initialize the logger
     if options.logpath:
         path = options.logpath.rstrip('/')
         ensure_dir(path)
     else:
         path = cp.get("main", "logpath")
         if path[0] == "$":
             envvar = os.getenv(path.split()[0][1:]) 
             if len(path.split()) > 1:
                 path = envvar + path.split()[1]
             else:
                 path = envvar
     print path
     ensure_dir(path)
     logfile = path.rstrip('/') + '/parallel-wsn-'+socket.gethostname()+'.log'
     if options.verbose:
         print "logging to: %s" %logfile
         
     sys.logger = logging.getLogger('parallel-wsn')
     sys.logger.setLevel(sys.log_lvl) #@UndefinedVariable (this is to ignore the error in eclipse)
     lfh = RotatingFileHandler(logfile, mode='a', maxBytes=1000000, backupCount=5)        
     sys.logger.addHandler(lfh) #@UndefinedVariable (this is to ignore the error in eclipse)
     formatter = logging.Formatter('%(asctime)s %(message)s')
     lfh.setFormatter(formatter)
     #don't log to console
     sys.logger.propagate = False
     #on init start a new file if existing one is not empty
     if os.path.getsize(logfile) > 0:
         lfh.doRollover()
                    
     global comm_server
     comm_server = communications.ClientCommHandler
     
     global comm_client
     comm_client = communications.CommClient()
     
     if not args[1]:
         print "No command specified!"
         sys.logger.error("No command specified") #@UndefinedVariable (this is to ignore the error in eclipse)
         sys.exit(-1)
     
     if cp.get("main", "individual_logfiles") == "yes":
         handler = tcp_handler(True, path.rstrip('/'), cp.get("main", "stdout"), args[1])
     else:
         handler = tcp_handler(False, path.rstrip('/'), cp.get("main", "stdout"), args[1])
     
     if os.uname()[1] == "uhu":
         #bind server only to the VLAN ip-address so that nobody outside can access the server
         ip_addr = communications.ip_address().get_ip_address("br-meshnet")
     else:
         #not running on huhu - bind server to all valid ip-addresses of this machine
         ip_addr = "0"
     
     #server in own thread
     sys.logger.debug("starting TCP-Server on ip %s port %s and putting it in own thread" %(ip_addr, int(cp.get("main", "client_port")))) #@UndefinedVariable (this is to ignore the error in eclipse)    
     ch = communications.myTCPServer((ip_addr, int(cp.get("main", "client_port"))), comm_server, handler)
     ch.allow_reuse_address = True
     ch_thread = threading.Thread(target=ch.serve_forever)
     ch_thread.setDaemon(1)
     ch_thread.start()
     
     #parse the passed hosts
     sys.logger.debug("parsing the hosts_file at %s" %options.host_files) #@UndefinedVariable (this is to ignore the error in eclipse)
     hosts = hostsParser.read_host_files(options.host_files)
      
     #if additional host_strings were defined add them too
     if options.host_strings:
         sys.logger.debug("parsing the additional hosts string %s" %options.host_strings) #@UndefinedVariable (this is to ignore the error in eclipse)
         for host_string in options.host_strings:
             res = hostsParser.parse_host_string(host_string)
             if res:
                 hosts.extend(res)
     
     if not hosts:
         print "No Hosts specified!"
         sys.logger.error("No hosts were specified in either a file or the string (-h or -H option)") #@UndefinedVariable (this is to ignore the error in eclipse)
         sys.exit(-1)
     
     if options.verbose:            
         print hosts
     
     daemon_port = cp.get("main", "daemon_port")
     sys.logger.debug("sending command to the host(s) now") #@UndefinedVariable (this is to ignore the error in eclipse)
     for host in hosts:
         if options.verbose:
             print "sending %s, %s, %s" %(args[1], str(host[0]), (host[1] if host[1] else daemon_port))
         sys.logger.debug("sending %s, %s, %s" %(args[1], str(host[0]), (host[1] if host[1] else daemon_port))) #@UndefinedVariable (this is to ignore the error in eclipse)
         try:
             comm_client.send(args[1], str(host[0]), (host[1] if host[1] else daemon_port))
         except:
             sys.logger.error("Couldn't send %s to %s" %(args[1], str(host[0]))) #@UndefinedVariable (this is to ignore the error in eclipse)
             print ("Couldn't send %s to %s" %(args[1], str(host[0])))
     
     if options.verbose:
         print "waiting for answers"
     if options.waiting_time:
         sys.logger.debug("waiting %s seconds for answers" %options.waiting_time) #@UndefinedVariable (this is to ignore the error in eclipse)
         time.sleep(float(options.waiting_time))
     else:
         sys.logger.debug("waiting %s seconds for answers" %cp.get("main", "waiting_time")) #@UndefinedVariable (this is to ignore the error in eclipse)
         time.sleep(float(cp.get("main", "waiting_time")))
     
     sys.logger.debug("shutting down TCP-Server and exiting system now") #@UndefinedVariable (this is to ignore the error in eclipse)
     ch.server_close()
     ch.shutdown()
示例#55
0
     ensure_dir(path)
     logfile = path.rstrip('/') + '/parallel-wsn-daemon-'+socket.gethostname()+'.log'
     if options.verbose:
         print "logging to: %s" %logfile
 lfh = RotatingFileHandler(logfile, mode='a', maxBytes=1000000, backupCount=5)        
 sys.logger.addHandler(lfh) #@UndefinedVariable (this is to ignore the error in eclipse)
 formatter = logging.Formatter('%(asctime)s %(message)s')
 lfh.setFormatter(formatter)
 #don't log to console
 sys.logger.propagate = False 
 
 #if starting or restarting - (re-)read the configuration and write new logfile
 if (args[1] == 'start' or args[1] == 'restart'):
     #on init start a new file if existing one is not empty
     if os.path.getsize(logfile) > 0:
         lfh.doRollover()
 
 #check the daemon commands
 if options.verbose:
     daemon = parallel_wsn_daemon(pidfile, sys.stdin, sys.stdout, sys.stderr)
 else:
     daemon = parallel_wsn_daemon(pidfile)
 if len(args) == 2:
         if 'start' == args[1]:
             sys.logger.debug("Received Signal to start the daemon") #@UndefinedVariable (this is to ignore the error in eclipse)
             daemon.start()                
         elif 'stop' == args[1]:
             sys.logger.debug("Received Signal to stop the daemon") #@UndefinedVariable (this is to ignore the error in eclipse)
             daemon.stop()
         elif 'restart' == args[1]:
             sys.logger.debug("Received Signal to restart the daemon") #@UndefinedVariable (this is to ignore the error in eclipse)
示例#56
0
    def doRollover(self):
        RotatingFileHandler.doRollover(self)

        # Try to fix up permissions on new file
        set_log_owner(self.baseFilename)
示例#57
0
class Job(object):
    """Holds the state of a job in memory"""
    def __init__(self, job_id, job_dir, parsed_json, previous_stats = None):
        self.state = {}
        self.state["id"] = job_id
        self.state["job_directory"] = job_dir
        self.state["json_config"] = parsed_json
        self.state["next_run"] = const.DATETIME_NEVER
        self.state["last_run"] = const.DATETIME_NEVER
        self.state["last_status"] = ""
        self.state["name"] = parsed_json["name"]


        if previous_stats:
            if 'last_run' in previous_stats:
                self.state["last_run"] = previous_stats['last_run']
            if 'last_run_status' in previous_stats:
                self.state["last_status"] = previous_stats['last_run_status']

        self.state["run_now"] = False
        self.state["is_running"] = False

        self.job_logger = logging.getLogger('jobs.' + job_id)
        log_file_handler = logging.FileHandler(os.path.join(self.state["job_directory"], 'job.log'))
        log_file_handler.setLevel(logging.DEBUG)
        self.job_logger.addHandler(log_file_handler)

        self.job_run_logger = logging.getLogger('jobs.' + self.state["id"] + '.last_run')
        self.job_run_file_handler = RotatingFileHandler(
                                                    os.path.join(self.state["job_directory"],
                                                                 'job.last_run.log'),
                                                    backupCount = 3)
        self.job_run_file_handler.setLevel(logging.DEBUG)
        self.job_run_logger.addHandler(self.job_run_file_handler)

        self.update_schedule()

    def serialize(self):
        """create a serializable dict from the job's fields"""

        next_run_str = self.state['next_run']
        if self.state['next_run'] is const.DATETIME_NEVER:
            next_run_str = 'NEVER'

        last_run_str = self.state['last_run']
        if self.state['last_run'] is const.DATETIME_NEVER:
            last_run_str = 'NEVER'

        return {
            'next_run' : next_run_str,
            'last_run' : last_run_str,
            'last_run_status' : self.state['last_status'],
            'name' : self.state['name'],
            'id' : self.state['id']
        }

    def __repr__(self):
        return self.state['name']

    @property
    def job_id(self):
        """accessor for human readable job id"""
        return self.state['id']

    @property
    def name(self):
        """accessor for human readable job name"""
        return self.state['name']

    @property
    def next_scheduled_run(self):
        """accessor for next scheduled run as a datetime"""
        return self.state['next_run']

    @property
    def is_running(self):
        """accessor for is_running boolean"""
        return self.state['is_running']

    def run_now(self):
        """schedules the job to run immediately"""
        self.state['next_run'] = datetime.datetime.now()
        self.state['run_now'] = True

    def update_schedule(self):
        """ polls all of the job's triggers to see if anything changes
            require an update of the schedule
        """
        if self.state['run_now']:
            self.state['next_run'] = datetime.datetime.now()
            return

        next_run = const.DATETIME_NEVER

        self.job_logger.debug('evaluating triggers...')

        for trigger_data in self.state['json_config']['triggers']:
            trigger_class = dynamic_import_trigger(trigger_data['className'])
            trigger = trigger_class(self.state['id'], trigger_data, self.job_logger)
            temp_next_run = trigger.next_run()
            next_run = min(next_run, temp_next_run)

        self.state['next_run'] = next_run

    def _resolve_workspace_dir(self):
        if not 'workspace' in self.state['json_config']:
            working_dir = os.path.join(self.state['job_directory'], 'workspace')
        else:
            working_dir = self.state['json_config']['workspace']['workspace_path']
            if not os.path.isabs(working_dir):
                working_dir = os.path.join(self.state['job_directory'], working_dir)
        return working_dir

    def run(self):
        """runs the actions of a job"""
        self.state['run_now'] = False
        self.state['is_running'] = True
        self.update_schedule()

        working_dir = self._resolve_workspace_dir()

        if not os.path.isdir(working_dir):
            os.mkdir(working_dir)

        logging.debug(self.state['json_config']['actions'])
        self.state['last_run'] = datetime.datetime.now()

        job_status = const.SUCCESS
        self.job_logger.info('executing job...')
        self.job_run_file_handler.doRollover()
        self.job_run_logger.handlers[0].doRollover()

        for action_data in self.state['json_config']['actions']:
            action_class = dynamic_import_action(action_data['className'])
            action = action_class(action_data, working_dir, self.job_run_logger)

            result = action.run()

            if ((result is not const.SUCCESS) and
                    (result is not const.UNSTABLE) and
                        (result is not const.FAILURE)):
                logging.error('job action returned an invalid result: ' + result)
                # TODO: throw/error job here

            job_status = result

        self.job_run_logger.info(job_status)

        self.state['last_status'] = job_status

        with open(os.path.join(self.state['job_directory'], 'job.stats'), 'w') as stats_file:
            json.dump({
                       "last_run": self.state['last_run'].isoformat(),
                       "last_run_status": self.state['last_status']},
                      stats_file)

        self.state['is_running'] = False
示例#58
0
# -*- coding: utf-8 -*-
#
# Author: Ingelrest François ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA

import consts, logging

from logging.handlers import RotatingFileHandler

__logHandler = RotatingFileHandler(consts.fileLog, maxBytes=0, backupCount=2)
__logHandler.doRollover()
__logHandler.setFormatter(logging.Formatter('[%(asctime)s] %(levelname)-7s %(message)s', datefmt='%y%m%d%H%M%S'))

logger = logging.getLogger(consts.appNameShort)
logger.setLevel(logging.INFO)
logger.addHandler(__logHandler)