示例#1
0
def getLogger(log_path, level=logging.INFO):
    logger = logging.getLogger("Rotating Logger")
    logger.setLevel(level)
    fhandler = RotatingFileHandler(log_path, maxBytes=1024*1024*10, backupCount=5)
    fhandler.setFormatter(logging.Formatter("[%(asctime)s] %(message)s"))
    logger.addHandler(fhandler)
    return logger
示例#2
0
def setup_logging(verbose=True, level="INFO", directory=None, filename=None,
                  rotation_size="10MB"):
    """
    Setup logging.
    """

    root_logger = logging.getLogger("")

    for handler in root_logger.handlers:
        root_logger.removeHandler(handler)

    if verbose:
        handler = logging.StreamHandler(sys.stdout)
        handler.setLevel(parse_loglevel(level))
        root_logger.addHandler(handler)

    if not(directory is None and filename is None):
        max_log_size = parse_size(rotation_size)

        filepath = os.path.join(directory, filename)

        handler = RotatingFileHandler(filepath, maxBytes=max_log_size,
                                      backupCount=5)

        handler.setFormatter(
            logging.Formatter("%(asctime)s %(levelname)s %(message)s"))

        root_logger.setLevel(parse_loglevel(level))
        root_logger.addHandler(handler)

    return logging
示例#3
0
文件: ftpd.py 项目: a6708051/oss-ftp
 def __set_logger(self, log_level):
     work_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
     log_dir = work_dir + '/data/ossftp/'
     try:
         os.makedirs(log_dir)
     except OSError as exc: 
         if exc.errno == errno.EEXIST and os.path.isdir(log_dir):
             pass
         else:
             raise
     LOGFILE = os.path.join(log_dir, "ossftp.log")
     MAXLOGSIZE = 10*1024*1024 #Bytes
     BACKUPCOUNT = 30
     FORMAT = "%(asctime)s %(levelname)-8s[%(filename)s:%(lineno)d(%(funcName)s)] %(message)s"
     handler = RotatingFileHandler(LOGFILE,
             mode='w',
             maxBytes=MAXLOGSIZE,
             backupCount=BACKUPCOUNT)
     formatter = logging.Formatter(FORMAT)
     handler.setFormatter(formatter)
     logger = logging.getLogger()
     if log_level == "DEBUG":
         logger.setLevel(logging.DEBUG)
     elif log_level == "INFO":
         logger.setLevel(logging.INFO)
     elif log_level == "WARNING":
         logger.setLevel(logging.WARNING)
     elif log_level == "ERROR":
         logger.setLevel(logging.ERROR)
     elif log_level == "CRITICAL":
         logger.setLevel(logging.CRITICAL)
     else:
         print "wrong loglevel parameter: %s" % log_level
         exit(1)
     logger.addHandler(handler)
示例#4
0
文件: app.py 项目: nict-isp/scn-admin
def configure_logging(app):
    """
    Configure logger.

    Logger object set to utils.logging class.

    :param app: Flask object
    """
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s '
        '[in %(pathname)s:%(lineno)d]'
    )

    root_path = os.path.dirname(app.root_path)
    debug_log = os.path.join(root_path, app.config['DEBUG_LOG'])

    debug_file_handler = RotatingFileHandler(
        debug_log, maxBytes=100000, backupCount=100000
    )

    debug_file_handler.setLevel(app.config['LOG_LEVEL'])
    debug_file_handler.setFormatter(formatter)
    app.logger.addHandler(debug_file_handler)

    error_log = os.path.join(root_path, app.config['ERROR_LOG'])
    error_file_handler = RotatingFileHandler(
        error_log, maxBytes=100000, backupCount=10
    )

    error_file_handler.setLevel(logging.ERROR)
    error_file_handler.setFormatter(formatter)
    app.logger.addHandler(error_file_handler)
示例#5
0
def init(verbose=0, quiet=False, filename='activity.log'):
    """
    Initialize the logger
    * verbose (int) specify the verbosity level of the standart output
      0 (default) ~ ERROR, 1 ~ WARN & WARNING, 2 ~ INFO, 3 ~ DEBUG
    * quiet (boolean) allow to remove all message whatever is the verbosity lvl
    """
    if not os.path.exists('log'):
        os.mkdir('log')

    with open("log/" + filename, 'w'):
        pass

    logger = logging.getLogger()
    logger.propagate = False
    logger.setLevel(min([
        conf['logging']['log_file_level'], 
        conf['logging']['log_console_level'], 
        verbose]))

    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: ' +
        '%(filename)s:%(funcName)s[%(lineno)d] :: %(message)s')
    file_handler = RotatingFileHandler("log/" + filename, 'w', 10000000, 10)
    file_handler.setLevel(conf['logging']['log_file_level'])
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: ' +
        '%(filename)s:%(funcName)s[%(lineno)d] :: %(message)s')
    file_handler = RotatingFileHandler("log/errors.log", 'w', 10000000, 10)
    file_handler.setLevel(logging.ERROR)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    formatter = logging.Formatter(
        '%(levelname)s :: %(filename)s :: %(message)s')
    stream_handler = logging.StreamHandler()
    if verbose is -1:
        stream_handler.setLevel(conf['logging']['log_file_level'])
    elif verbose is 0:
        stream_handler.setLevel(logging.ERROR)
    elif verbose is 1:
        stream_handler.setLevel(logging.WARNING)
    elif verbose is 2:
        stream_handler.setLevel(logging.INFO)
    elif verbose is 3:
        stream_handler.setLevel(logging.DEBUG)
    elif verbose is 4:
        stream_handler.setLevel(0)
    else:
        stream_handler.setLevel(conf['logging']['log_console_level'])
    stream_handler.setFormatter(formatter)
    if not quiet:
        logger.addHandler(stream_handler)

    logging.info("=" * 80)
    logging.info('Logging system started: verbose=%d, quiet=%s' %
                 (verbose, str(quiet)))
示例#6
0
def getLogger(config, section, keyword="Test", tid=None):
    """
    @param config : ConfigParser object
    @param section : Section in config
    @param keyword : additional keyword
    """

    level = config.get("global","log_level")

    log_dir = config.get("global", "log_dir")
    log_path = config.get(section, "logfile")
    fname = join(log_dir, log_path)

    #Thread id(Multiprocess id)
    if tid != None:
        fname = "%s.%s" % (fname,tid)

    logger = logging.getLogger(str(keyword))              
    logger.setLevel( LOG_LEVELS[level] )

    if fname:
        log_handler = RotatingFileHandler(fname, maxBytes=100000000, backupCount=5)
    else:
        log_handler = StreamHandler(sys.stdout)

    log_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    log_handler.setFormatter(log_formatter)
    logger.addHandler(log_handler)

    return logger
示例#7
0
文件: logger.py 项目: LeGaulois/soc
class log(object):
    def __init__(self,fichier,instance):
        self.logger = logging.getLogger(instance)
        
        self.logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
        self.file_handler = RotatingFileHandler(fichier, 'a', 5000000, 1)
        self.file_handler.setLevel(logging.DEBUG)   
        self.file_handler.setFormatter(formatter)
        self.file_handler.createLock()
        self.logger.addHandler(self.file_handler)
        
    def ecrire(self,message,niveau):
        if niveau=='critical':
            self.logger.critical(message)

        elif niveau=='error':
            self.logger.error(message)

        elif niveau=='warning':
            self.logger.warning(message)

        elif niveau=='info':
            self.logger.info(message)
        else:        
            self.logger.debug(message)

    def fermer(self):
        self.file_handler.close()
示例#8
0
文件: app.py 项目: coco413/walle-web
def register_logging(app):
    # TODO https://blog.csdn.net/zwxiaoliu/article/details/80890136
    # email errors to the administrators
    import logging
    from logging.handlers import RotatingFileHandler
    # Formatter
    formatter = logging.Formatter(
            '%(asctime)s %(levelname)s %(pathname)s %(lineno)s %(module)s.%(funcName)s %(message)s')

    # log dir
    if not os.path.exists(app.config['LOG_PATH']):
        os.makedirs(app.config['LOG_PATH'])

    # FileHandler Info
    file_handler_info = RotatingFileHandler(filename=app.config['LOG_PATH_INFO'])
    file_handler_info.setFormatter(formatter)
    file_handler_info.setLevel(logging.INFO)
    info_filter = InfoFilter()
    file_handler_info.addFilter(info_filter)
    app.logger.addHandler(file_handler_info)

    # FileHandler Error
    file_handler_error = RotatingFileHandler(filename=app.config['LOG_PATH_ERROR'])
    file_handler_error.setFormatter(formatter)
    file_handler_error.setLevel(logging.ERROR)
    app.logger.addHandler(file_handler_error)
示例#9
0
def setup_logging(name,logdir=None, scrnlog=True, txtlog=True, loglevel=logging.DEBUG):
    logdir = os.path.abspath(logdir)

    if not os.path.exists(logdir):
        os.mkdir(logdir)

    log = logging.getLogger(name)
    log.setLevel(loglevel)
    log.propagate = False
    
    log_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")

    if txtlog:
        txt_handler = RotatingFileHandler(os.path.join(logdir, "blogstrap-py.log"), maxBytes=(1024*1024*20), backupCount=5)
        #txt_handler.doRollover()
        txt_handler.setFormatter(log_formatter)
        log.addHandler(txt_handler)
        log.info("Logger initialised.")

    if scrnlog:
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(log_formatter)
        log.addHandler(console_handler)

    return log
示例#10
0
    def __init__(self, pid, config, debug):
        AbstractDaemon.__init__(self, pid, "acolyte", debug=debug)
        self.processes = []
        self.collectors = []
        self.config = config
        self.shutdown_event = multiprocessing.Event()

        log_fmt_default = "[%(asctime)s] %(levelname)s: %(name)s: %(message)s"
        log_format = config.get("master").get("log_format", log_fmt_default)
        self.log_format = log_format

        fmt = logging.Formatter(log_format)

        logfile = config.get("master").get("logfile")

        max_log_size = int(config.get("master").get("max_log_size", 1))
        handler = RotatingFileHandler(logfile, backupCount=5, maxBytes=max_log_size * 1000000)

        handler.setFormatter(fmt)
        handler_fd = handler.stream.fileno()
        self.files_preserve = [handler_fd]

        log = logging.getLogger(self.name)

        if debug == "on":
            log.setLevel(logging.DEBUG)
            self.debug = True
        else:
            log.setLevel(logging.INFO)

        log.addHandler(handler)
        self.log = log
示例#11
0
文件: cli.py 项目: levi-rs/chirp
def configure_logger():
    """
    Creates a rotating log

    :param dir_path: String, path to current directory
    """
    # Formatting
    formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(message)s')

    # Set up STDOUT handler
    stdout_handler = logging.StreamHandler(sys.stdout)
    stdout_handler.setLevel(logging.DEBUG)
    stdout_handler.setFormatter(formatter)

    # Set up file logging with rotating file handler
    rotate_fh = RotatingFileHandler(LOG_FILE, backupCount=5, maxBytes=1000000)
    rotate_fh.setLevel(logging.DEBUG)
    rotate_fh.setFormatter(formatter)

    # Create Logger object
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    logger.addHandler(stdout_handler)
    logger.addHandler(rotate_fh)

    return logger
def configure_app(app):
    """Main config function:
    Works out what environment to configure for based on
    Environment Variable (Dev is assumed if none found),
    then uses that to select the config class, and sets
    logging options.
    :param app: Flask app object
    """

    config = {"Dev": "restaurants.config.DevConfig",
              "Test": "restaurants.config.TestConfig",
              "Prod": "restaurants.config.ProdConfig"
              }

    # Get Environment Variable
    env = os.getenv('RESTAURANT_APP_ENV', 'Dev')

    # Config based on options in this file
    app.config.from_object(config[env])

    # Config based on options in "APPLICATION_SETTINGS" file if it exists (used for anything sensitive)
    try:
        app.config.from_pyfile(app.config.get('APPLICATION_SETTINGS'))
    except IOError:
        print 'could not find ' + app.config.get('APPLICATION_SETTINGS') + ', continuing without it'

    # Logging Config
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler(app.config['LOGGING_LOCATION'], maxBytes=1024 * 1024 * 100, backupCount=20)
    formatter = logging.Formatter(app.config['LOGGING_FORMAT'])
    file_handler.setFormatter(formatter)
    app.logger.addHandler(file_handler)
 def get_logger(self):
     log = logging.getLogger('djutils.queue.logger')
     log.setLevel(logging.DEBUG)
     handler = RotatingFileHandler(self.logfile, maxBytes=1024*1024, backupCount=3)
     handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(message)s"))
     log.addHandler(handler)
     return log
示例#14
0
def enable_cli_log(debug=0):
    """
    Use this helper to add a rotating file handler to the 'ochopod' logger. This file will be
    located in /var/log so that the CLI can go get it. This is typically used when your pod is simply running
    another python script (e.g you can log from that script and see it in the CLI).

    :type debug: boolean
    :param debug: true to switch debug logging on
    """

    #
    # - add a small capacity rotating log
    # - this will be persisted in the container's filesystem and retrieved via /log requests
    # - an IOError here would mean we don't have the permission to write to /var/log for some reason (just skip)
    #
    logger = logging.getLogger('ochopod')
    try:
        handler = RotatingFileHandler(LOG, maxBytes=32764, backupCount=3)
        handler.setLevel(INFO)
        handler.setFormatter(Formatter('%(asctime)s - %(levelname)s - %(message)s'))
        logger.addHandler(handler)

    except IOError:
        pass

    #
    # - switch all handlers to DEBUG if requested
    #
    if debug:
        for handler in logger.handlers:
            handler.setLevel(DEBUG)
示例#15
0
    def __init__(self):
        super(Actuasim, self).__init__()
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
        file_handler = RotatingFileHandler('actuasim.log', 'a', 10000000, 1)
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)
        self.logger.info('=======================================')
        self.logger.info('           ACTUASIM START')
        self.logger.info('=======================================')
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        self.resize(1700, 900)
        self.classrooms = []
        self.tabs = QTabWidget()
        self.setCentralWidget(self.tabs)
        self.file_menu = self.ui.menubar.addMenu("&File")
        self.save_action = QAction("&Save", self, triggered=self.save)
        self.file_menu.addAction(self.save_action)
        self.load_action = QAction("&Load", self, triggered=self.load)
        self.file_menu.addAction(self.load_action)
        self.command_handler = CommandHandler(self)

        # endpoints, status, id
        self.control_endpoint = ('0.0.0.0', 0)
        self.data_endpoint = ('0.0.0.0', 0)
        self.status = 0
        self.channel_id = random.randint(0, 255)  # TODO: handle multiple channel

        # server
        self.knxserver = Knxserver()
        self.knxserver.trigger.connect(self.frame_received)
        self.knxserver.start()
示例#16
0
    def __setLogging__(self):
        """
        Initializes logging. Use by the constructor.
        """
        compSect = self.myconfig.RemoteMsg

        # Logging
        if not hasattr(compSect, "logFile"):
            compSect.logFile = os.path.join(compSect.RemoteMsgDir, "remoteMsg.log")
        print("Log file is: " + compSect.logFile)

        if not hasattr(compSect, "listenerLogFile"):
            compSect.listenerLogFile = os.path.join(compSect.RemoteMsgDir, "listener.log")
        print("Listener log file is: " + compSect.listenerLogFile)

        logHandler = RotatingFileHandler(compSect.logFile, "a", 1000000, 3)
        logFormatter = logging.Formatter("%(asctime)s:%(levelname)s:%(filename)s:%(message)s")
        logHandler.setFormatter(logFormatter)
        self.mylogger = logging.getLogger("RemoteMsg")
        self.mylogger.addHandler(logHandler)
        self.mylogger.setLevel(logging.INFO)
        # map log strings to integer levels:
        self.logMsg = {
            "DEBUG": logging.DEBUG,
            "ERROR": logging.ERROR,
            "NOTSET": logging.NOTSET,
            "CRITICAL": logging.CRITICAL,
            "WARNING": logging.WARNING,
            "INFO": logging.INFO,
        }
        ##                    'SQLDEBUG' : logging.SQLDEBUG  }
        if hasattr(compSect, "logLevel") and compSect.logLevel in self.logMsg.keys():
            self.mylogger.setLevel(self.logMsg[compSect.logLevel])
示例#17
0
文件: PoleLog.py 项目: itjp/pole
def setup_logging(app):
    log = logging.getLogger(app)
    log.setLevel(logging.INFO)

    formatter = logging.Formatter("%(levelname)s %(asctime)s %(name)s "
                                  "%(filename)s(%(lineno)d): %(message)s")

    sh = logging.StreamHandler()
    sh.setFormatter(formatter)
    log.addHandler(sh)

    log_dir = os.path.expanduser('~')
    log_file = os.path.join(log_dir, '.erp', app + '.log')
    try:
        fh = RotatingFileHandler(log_file, maxBytes=_MAX_LOG_SIZE,
                                 backupCount=_MAX_LOG_BACKUP)
    except IOError:
        logging.exception('Could not set up file logging.')
        fh = None

    if fh:
        fh.setLevel(logging.INFO)
        fh.setFormatter(formatter)
        log.addHandler(fh)

    if os.getenv('DEBUG_SESSION_POLE', 0) == '1':
        log.setLevel(logging.DEBUG)
        log.debug('Debug enabled.')

        try:
            fh.setLevel(logging.DEBUG)
        except Exception, e:
            log.error(e)
示例#18
0
def get_logger(name, logfile=ZTASKD_LOG_PATH, loglevel=ZTASKD_LOG_LEVEL):
    LEVELS = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warning': logging.WARNING,
        'error': logging.ERROR,
        'critical': logging.CRITICAL
    }

    logger_ = logging.getLogger("ztaskq.%s" % name)
    logger_.propagate = False
    logger_.setLevel(LEVELS[loglevel.lower()])
    if logfile:
        if '%(name)s' in logfile:
            filename = logfile % { 'name': name }
        else:
            filename = logfile
        handler = RotatingFileHandler(filename=filename,
                                      maxBytes=ZTASKD_LOG_MAXBYTES,
                                      backupCount=ZTASKD_LOG_BACKUP)
    else:
        handler = logging.StreamHandler()

    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    handler.setFormatter(formatter)
    logger_.addHandler(handler)

    return logger_
def configure_logging(app):
    ''' Configure logging.
    :param app: The Flask application object.
    '''

    # Get the path of the log from the config
    log_path = app.config['LOG_PATH']

    # Get the level of logging from the config
    log_level = app.config['LOG_LEVEL']

    # If path directory doesn't exist, create it.
    log_dir = os.path.dirname(log_path)
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    # Create formatter
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    # Create Log_Handler
    log_handler = RotatingFileHandler(log_path, maxBytes=250000, backupCount=5)

    # add formatter to log handler
    log_handler.setFormatter(formatter)

    # Get the level of the Debug and set it to the logger
    app.logger.setLevel(log_level)

    # Add the handlers to the logger
    app.logger.addHandler(log_handler)

    # Test if the logging is working by typing this string to a file.
    app.logger.info('Logging to: %s', log_path)
示例#20
0
 def __init__(self, debug, log_name, log_level, logger):
     self.logger = logging.getLogger(logger)
     if debug:
         logfile = os.path.join(os.getcwd(), log_name)
         max_log_size = 100*1024*1024 #Bytes
         backup_count = 5
         format = \
         "%(asctime)s %(levelname)-8s[%(filename)s:%(lineno)d(%(funcName)s)] %(message)s"
         hdlr = RotatingFileHandler(logfile,
                                       mode='a',
                                       maxBytes=max_log_size,
                                       backupCount=backup_count)
         formatter = logging.Formatter(format)
         hdlr.setFormatter(formatter)
         self.logger.addHandler(hdlr)
         if "DEBUG" == log_level.upper():
             self.logger.setLevel(logging.DEBUG)
         elif "INFO" == log_level.upper():
             self.logger.setLevel(logging.INFO)
         elif "WARNING" == log_level.upper():
             self.logger.setLevel(logging.WARNING)
         elif "ERROR" == log_level.upper():
             self.logger.setLevel(logging.ERROR)
         elif "CRITICAL" == log_level.upper():
             self.logger.setLevel(logging.CRITICAL)
         else:
             self.logger.setLevel(logging.ERROR)
     else:
         self.logger.addHandler(EmptyHandler())
示例#21
0
def initialize_logger(redfish_logfile):
    """Return api version.

    :param redfish_logfile: redfish log
    :type str
    :returns:  True

    """
    global logger
    logger = logging.getLogger()
    
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        '%(asctime)s :: %(levelname)s :: %(message)s'
        )
    file_handler = RotatingFileHandler(redfish_logfile, 'a', 1000000, 1)

    # First logger to file
    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    # Second logger to console
    steam_handler = logging.StreamHandler()
    steam_handler.setLevel(logging.DEBUG)
    logger.addHandler(steam_handler)
    return True
示例#22
0
def setup_logger(cfg):
    if 'LOGFILE' in cfg:
        file_handler = RotatingFileHandler(cfg['LOGFILE'], 'a', 1 * 1024 * 1024, 10)
        file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
        file_handler.setLevel(getattr(logging, cfg['DEBUG']))
    logger.setLevel(getattr(logging, cfg['DEBUG']))
    logger.addHandler(file_handler)
示例#23
0
    def start(self):
        # remove all handlers
        self.handlers = []

        # console log handler
        if self.consoleLogging:
            console = logging.StreamHandler()
            console.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%H:%M:%S'))
            console.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(console)

        # rotating log file handlers
        if self.logFile and makeDir(os.path.dirname(self.logFile)):
            rfh = RotatingFileHandler(
                filename=self.logFile,
                maxBytes=self.logSize,
                backupCount=self.logNr
            )

            rfh_errors = RotatingFileHandler(
                filename=self.logFile.replace('.log', '.error.log'),
                maxBytes=self.logSize,
                backupCount=self.logNr
            )

            rfh.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S'))
            rfh.setLevel(self.logLevels['INFO'] if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(rfh)

            rfh_errors.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s::%(threadName)s::%(message)s', '%Y-%m-%d %H:%M:%S'))
            rfh_errors.setLevel(self.logLevels['ERROR'])
            self.addHandler(rfh_errors)
示例#24
0
def main():
    ini=Recall()
    #
    #setup logging
    #
    logger=logging.getLogger()
    logger.setLevel(logging.INFO)
    formatter=logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
    file_handler = RotatingFileHandler('ridirect.log', 'a', 1000000, 1)

    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    logger.addHandler(file_handler)

    stream_handler = logging.StreamHandler()
    stream_handler.setLevel(logging.DEBUG)
    formatter=logging.Formatter('%(asctime)s \t %(filename)s \t %(levelname)s \t %(message)s', "%H:%M:%S")
    stream_handler.setFormatter(formatter)
    logger.addHandler(stream_handler)

    r=Measurement(None,ini)
    print "Time out: ",r.time_out
    logging.info("Start"+r.version)
    r.collect_new()
    r.dispatch_new()
    logging.info("Stop")
    return True
示例#25
0
文件: sncli.py 项目: Qu4tro/sncli
class sncli:

    def __init__(self, do_server_sync, verbose=False):
        self.config         = Config()
        self.do_server_sync = do_server_sync
        self.verbose        = verbose
        self.do_gui         = False

        if not os.path.exists(self.config.get_config('db_path')):
            os.mkdir(self.config.get_config('db_path'))

        # configure the logging module
        self.logfile = os.path.join(self.config.get_config('db_path'), 'sncli.log')
        self.loghandler = RotatingFileHandler(self.logfile, maxBytes=100000, backupCount=1)
        self.loghandler.setLevel(logging.DEBUG)
        self.loghandler.setFormatter(logging.Formatter(fmt='%(asctime)s [%(levelname)s] %(message)s'))
        self.logger = logging.getLogger()
        self.logger.setLevel(logging.DEBUG)
        self.logger.addHandler(self.loghandler)
        self.config.logfile = self.logfile

        logging.debug('sncli logging initialized')

        self.logs = []

        try:
            self.ndb = NotesDB(self.config, self.log, self.gui_update_view)
        except Exception, e:
            self.log(str(e))
            sys.exit(1)
示例#26
0
def init_log(filter=None):
    log = logging.getLogger()
    log.setLevel(logging.DEBUG)

    formatter = logging.Formatter(FORMAT)

    # add file rotation handler
    file_handler = RotatingFileHandler(
        filename="/var/log/mediaplat/mediaplat.log", maxBytes=1024 * 1024, backupCount=5, mode="a+"
    )

    stream_handler = logging.StreamHandler()

    if filter:
        file_handler.addFilter(filter)
        stream_handler.addFilter(filter)

    # log to file
    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(formatter)
    log.addHandler(file_handler)

    if DEBUG:
        # duplicate log to stdout with color
        stream_handler.setLevel(logging.DEBUG)
        stream_handler.setFormatter(ColoredFormatter())
        log.addHandler(stream_handler)
示例#27
0
 def _setup_file_log(self):
     """Add a file log handler."""
     
     file = os.path.abspath(os.path.expanduser(self._meta.file))
     log_dir = os.path.dirname(file)
     if not os.path.exists(log_dir):
         os.makedirs(log_dir)
         
     if self._meta.rotate:
         from logging.handlers import RotatingFileHandler
         file_handler = RotatingFileHandler(
             file, 
             maxBytes=int(self._meta.max_bytes), 
             backupCount=int(self._meta.max_files),
             )
     else:
         from logging import FileHandler
         file_handler = FileHandler(file)
     
     if self.get_level() == logging.getLevelName(logging.DEBUG):
         format = logging.Formatter(self._meta.debug_format)
     else:
         format = logging.Formatter(self._meta.file_format)
     file_handler.setFormatter(format)   
     file_handler.setLevel(getattr(logging, self.get_level())) 
     self.backend.addHandler(file_handler)
示例#28
0
    def __init__(self, filename, level="debug", logid="qiueer", mbs=20, count=10, is_console=True):
        '''
        mbs: how many MB
        count: the count of remain
        '''
        try:
            self._level = level
            #print "init,level:",level,"\t","get_map_level:",self._level
            self._filename = filename
            self._logid = logid

            self._logger = logging.getLogger(self._logid)
            
            
            if not len(self._logger.handlers):
                self._logger.setLevel(self.get_map_level(self._level))  
                
                fmt = '[%(asctime)s] %(levelname)s\n%(message)s'
                datefmt = '%Y-%m-%d %H:%M:%S'
                formatter = logging.Formatter(fmt, datefmt)
                
                maxBytes = int(mbs) * 1024 * 1024
                file_handler = RotatingFileHandler(self._filename, mode='a',maxBytes=maxBytes,backupCount=count)
                self._logger.setLevel(self.get_map_level(self._level))  
                file_handler.setFormatter(formatter)  
                self._logger.addHandler(file_handler)
    
                if is_console == True:
                    stream_handler = logging.StreamHandler(sys.stderr)
                    console_formatter = ColoredFormatter(fmt, datefmt)
                    stream_handler.setFormatter(console_formatter)
                    self._logger.addHandler(stream_handler)

        except Exception as expt:
            print expt
示例#29
0
def _get_handler():
    # we only need one global handler
    global handler
    if handler is not None:
        return handler

    path = '/var/log/rhsm/rhsm.log'
    try:
        if not os.path.isdir("/var/log/rhsm"):
            os.mkdir("/var/log/rhsm")
    except Exception:
        pass

    # Try to write to /var/log, fallback on console logging:
    try:
        handler = RotatingFileHandler(path, maxBytes=0x100000, backupCount=5, encoding='utf-8')
    except IOError:
        handler = logging.StreamHandler()
    except Exception:
        handler = logging.StreamHandler()

    handler.setFormatter(logging.Formatter(LOG_FORMAT))
    handler.setLevel(LOG_LEVEL)

    return handler
示例#30
0
def _init_logger(logger, phase):
    log_handler = RotatingFileHandler(LOGFILE, maxBytes=1048576, backupCount=5)
    fmt = '%(asctime)s %(levelname)s {0} [-] %(message)s'.format(phase)
    formatter = logging.Formatter(fmt)
    log_handler.setFormatter(formatter)
    logger.addHandler(log_handler)
    logger.setLevel(logging.DEBUG)
示例#31
0
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'  # this is the view that logs the user in
oid = OpenID(app, os.path.join(basedir, "tmp"))

# enabling email to be sent when there is an error
if not app.debug:
    import logging
    from logging.handlers import SMTPHandler
    credentials = None
    if MAIL_USERNAME or MAIL_PASSWORD:
        credentials = (MAIL_USERNAME, MAIL_PASSWORD)
    mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), 'no-reply@' + MAIL_SERVER, ADMINS, "app failure", credentials)
    mail_handler.setLevel(logging.ERROR)
    app.logger.addHandler(mail_handler)


# enable logging to a file
if not app.debug:
    import logging
    from logging.handlers import RotatingFileHandler
    file_handler = RotatingFileHandler('tmp/microblog.log', 'a', 1*1024*1024, 10)
    file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
    app.logger.setLevel(logging.INFO)
    file_handler.setLevel(logging.INFO)
    app.logger.addHandler(file_handler)
    app.logger.info('microblog startup')


# this app is different from the 'app' above.. this is the app module for which we will import views from
from app import views, models
示例#32
0
CHECK_FOR_DUPLICATE_INSTANCES = True

# work around a problem on non-posix-compliant platforms by not using any
# RotatingFileHandler there
if os.name == "posix":
    LOG_HANDLER = RotatingFileHandler(filename=LOG_FILENAME,
                                      mode="a",
                                      maxBytes=1024 * 1024,
                                      backupCount=5,
                                      encoding="utf-8")
else:
    LOG_HANDLER = logging.FileHandler(filename=LOG_FILENAME,
                                      mode="a",
                                      encoding="utf-8")
LOG_HANDLER.setLevel(level=LOG_LEVEL)
LOG_HANDLER.setFormatter(LOG_FORMATTER)

# init root logger
logging.basicConfig(format=LOG_FORMAT, datefmt=LOG_DATE, level=LOG_LEVEL)

# Maximum size of files uploaded as resrouce data.
# The default is a cautious value in order to protect the server
# against resource starvation; if you think your server can handle
# bigger files, feel free to try and increase this value.
MAXIMUM_UPLOAD_SIZE = 10 * 1024 * 1024

# Synchronization info:
SYNC_NEEDS_AUTHENTICATION = True

# URL for the Metashare Knowledge Base
KNOWLEDGE_BASE_URL = 'http://www.meta-share.org/portal/knowledgebase/'
示例#33
0
        logger.error(str(e))

    finally:
        if cursor:
            cursor.close()
        if cnx:
            cursor.close()


if __name__ == "__main__":
    """main"""
    # create logger
    logger = logging.getLogger('mysql-connector-demo')
    # specifies the lowest-severity log message a logger will handle,
    # where debug is the lowest built-in severity level and critical is the highest built-in severity.
    # For example, if the severity level is INFO, the logger will handle only INFO, WARNING, ERROR, and CRITICAL
    # messages and will ignore DEBUG messages.
    logger.setLevel(logging.ERROR)
    # create file handler which logs messages
    fh = RotatingFileHandler('mysql-connector-demo.log',
                             maxBytes=1024 * 1024,
                             backupCount=1)
    # create formatter and add it to the handlers
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    # add the handlers to logger
    logger.addHandler(fh)

    main(logger)
示例#34
0
logger.info("Start print log")
logger.debug("Do something")
logger.warning("Something maybe fail.")
logger.info("Finish")

handler = logging.FileHandler("log.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)

console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)


rHandler = RotatingFileHandler("log.txt", maxBytes=1 * 1024, backupCount=3)
rHandler.setLevel(logging.INFO)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
rHandler.setFormatter(formatter)

console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(formatter)
logger.addHandler(rHandler)
logger.addHandler(console)

示例#35
0
CONSOLE_LOG_FORMAT = FILE_LOG_FORMAT
LOG_FILENAME = __file__ + '.log'

LOG_LEVEL = os.getenv('LOG_LEVEL', logging.INFO)
if LOG_LEVEL == 'debug':
    LOG_LEVEL = logging.DEBUG

# setup for console
logging.basicConfig(level=LOG_LEVEL, format=CONSOLE_LOG_FORMAT)
logger = logging.getLogger('')

# setup for file
log_file_handler = RotatingFileHandler(LOG_FILENAME,
                                       maxBytes=20971520,
                                       backupCount=5)
log_file_handler.setFormatter(logging.Formatter(FILE_LOG_FORMAT))
logger.addHandler(log_file_handler)
##
##
##

input_file = 'csv_example_messy_input.csv'

results = []

while True:
    try:
        con = psycopg2.connect(
            "dbname='test' user='******' host='pg_test' password='******'")
        logger.info('connected to database')
        cur = con.cursor()
示例#36
0
文件: app.py 项目: rlcjj/fitly
# The Dash instance
app = create_dash(server)

# New DB startup tasks
db_startup(app)

# Logging
import logging
from logging.handlers import RotatingFileHandler
from .utils import config
from .api.sqlalchemy_declarative import dbRefreshStatus

# Can also use %(pathname)s for full pathname for file instead of %(module)s
handler = RotatingFileHandler('./config/log.log', maxBytes=10000000, backupCount=5)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s from %(module)s line %(lineno)d - %(message)s")
handler.setFormatter(formatter)
app.server.logger.setLevel(config.get('logger', 'level'))
app.server.logger.addHandler(handler)
# Suppress WSGI info logs
logging.getLogger('werkzeug').setLevel(logging.ERROR)

# Push an application context so we can use Flask's 'current_app'
with server.app_context():
    # load the rest of our Dash app
    from . import index

    # Enable refresh cron
    if config.get('cron', 'hourly_pull').lower() == 'true':
        try:
            from .api.datapull import refresh_database
示例#37
0
文件: dxmarket.py 项目: HYBG/DX
if not g_home:
    raise Exception('IKNOW_HOME not found!')

sys.path.append(os.path.join(g_home,'lib'))
from dxdb import dxdblib

logd = os.path.join(g_home, 'log')
if not os.path.isdir(logd):
    os.makedirs(logd, 0777)
g_logger = logging.getLogger('market')
formatstr = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logfile = os.path.join(logd,'market.log')
rh = RotatingFileHandler(logfile, maxBytes=100*1024*1024,backupCount=50)
rh.setLevel(logging.INFO)
fmter = logging.Formatter(formatstr)
rh.setFormatter(fmter)
g_logger.addHandler(rh)
g_logger.setLevel(logging.INFO)

class dxmarket:
    def __init__(self):
        self._db = dxdblib('localhost','root','123456','dx','utf8')

    def __del__(self):
        pass

    def GET(self):
        try:
            fromip = self._db.exesqlone('select count(*) from dx_global where name=%s and value=%s',(web.ctx.ip,'1'))
            if fromip[0]==0:
                return json.dumps({'retcode':'10003','retmessage':'ip reject'})
示例#38
0
from logging.handlers import RotatingFileHandler
from ..order.order import Order

# loading configuration file
config = configparser.ConfigParser()
config.read('config.ini')

# logger settings
logger = logging.getLogger('my_logger')
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler("python_client.log",
                              maxBytes=5 * 1024 * 1024,
                              backupCount=3)
FORMAT = "%(asctime)-15s %(message)s"
fmt = logging.Formatter(FORMAT, datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(fmt)
logger.addHandler(handler)


class Accounts:
    def __init__(self, session, base_url):
        """
        Initialize Accounts object with session and account information

        :param session: authenticated session
        """
        self.session = session
        self.account = {}
        self.base_url = base_url

    def account_list(self):
示例#39
0
import logging
from logging.handlers import RotatingFileHandler

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler('c_code.log', 'a', 1000000, 1)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
steam_handler = logging.StreamHandler()
steam_handler.setLevel(logging.DEBUG)
logger.addHandler(steam_handler)


class Logger(object):
    def __init__(self, label):
        super(Logger, self).__init__()
        self.label = label
        self.logger = logger

    def debug(self, string):
        self.logger.debug("%s::%s" % (self.label, string))

    def info(self, string):
        self.logger.info("%s::%s" % (self.label, string))

    def warning(self, string):
        self.logger.warning("%s::%s" % (self.label, string))

    def error(self, string):
示例#40
0
def setup_logger(name=None, logfile=None, level=logging.DEBUG, formatter=None, maxBytes=0, backupCount=0, fileLoglevel=None, disableStderrLogger=False):
    """
    Configures and returns a fully configured logger instance, no hassles.
    If a logger with the specified name already exists, it returns the existing instance,
    else creates a new one.

    If you set the ``logfile`` parameter with a filename, the logger will save the messages to the logfile,
    but does not rotate by default. If you want to enable log rotation, set both ``maxBytes`` and ``backupCount``.

    Usage:

    .. code-block:: python

        from logzero import setup_logger
        logger = setup_logger()
        logger.info("hello")

    :arg string name: Name of the `Logger object <https://docs.python.org/2/library/logging.html#logger-objects>`_. Multiple calls to ``setup_logger()`` with the same name will always return a reference to the same Logger object. (default: ``__name__``)
    :arg string logfile: If set, also write logs to the specified filename.
    :arg int level: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ to display (default: ``logging.DEBUG``).
    :arg Formatter formatter: `Python logging Formatter object <https://docs.python.org/2/library/logging.html#formatter-objects>`_ (by default uses the internal LogFormatter).
    :arg int maxBytes: Size of the logfile when rollover should occur. Defaults to 0, rollover never occurs.
    :arg int backupCount: Number of backups to keep. Defaults to 0, rollover never occurs.
    :arg int fileLoglevel: Minimum `logging-level <https://docs.python.org/2/library/logging.html#logging-levels>`_ for the file logger (is not set, it will use the loglevel from the ``level`` argument)
    :arg bool disableStderrLogger: Should the default stderr logger be disabled. Defaults to False.
    :return: A fully configured Python logging `Logger object <https://docs.python.org/2/library/logging.html#logger-objects>`_ you can use with ``.debug("msg")``, etc.
    """
    _logger = logging.getLogger(name or __name__)
    _logger.propagate = False
    _logger.setLevel(level)

    # Reconfigure existing handlers
    stderr_stream_handler = None
    for handler in list(_logger.handlers):
        if hasattr(handler, LOGZERO_INTERNAL_LOGGER_ATTR):
            if isinstance(handler, logging.FileHandler):
                # Internal FileHandler needs to be removed and re-setup to be able
                # to set a new logfile.
                _logger.removeHandler(handler)
                continue
            elif isinstance(handler, logging.StreamHandler):
                stderr_stream_handler = handler

        # reconfigure handler
        handler.setLevel(level)
        handler.setFormatter(formatter or LogFormatter())

    # remove the stderr handler (stream_handler) if disabled
    if disableStderrLogger:
        if stderr_stream_handler is not None:
            _logger.removeHandler(stderr_stream_handler)
    elif stderr_stream_handler is None:
        stderr_stream_handler = logging.StreamHandler()
        setattr(stderr_stream_handler, LOGZERO_INTERNAL_LOGGER_ATTR, True)
        stderr_stream_handler.setLevel(level)
        stderr_stream_handler.setFormatter(formatter or LogFormatter())
        _logger.addHandler(stderr_stream_handler)

    if logfile:
        rotating_filehandler = RotatingFileHandler(filename=logfile, maxBytes=maxBytes, backupCount=backupCount)
        setattr(rotating_filehandler, LOGZERO_INTERNAL_LOGGER_ATTR, True)
        rotating_filehandler.setLevel(fileLoglevel or level)
        rotating_filehandler.setFormatter(formatter or LogFormatter(color=False))
        _logger.addHandler(rotating_filehandler)

    return _logger
示例#41
0
formatter = logging.Formatter(
    "%(asctime)s — %(name)s — %(levelname)s — %(funcName)s:%(lineno)d — %(message)s"
)
debug = RotatingFileHandler(f"file/logs/DebugKommuneLog.log",
                            maxBytes=10 * 1024 * 1024,
                            backupCount=2)
inf = RotatingFileHandler(f"file/logs/InfoKommuneLog.log",
                          maxBytes=10 * 1024 * 1024,
                          backupCount=2)
err = RotatingFileHandler(f"file/logs/ErrorKommuneLog.log",
                          maxBytes=10 * 1024 * 1024,
                          backupCount=2)
inf.setLevel(logging.INFO)
err.setLevel(logging.ERROR)
debug.setLevel(logging.DEBUG)
inf.setFormatter(formatter)
err.setFormatter(formatter)
debug.setFormatter(formatter)
logger.addHandler(err)
logger.addHandler(inf)
logger.addHandler(debug)
disable_warnings()

try:
    logger.info("Loading Proxie settings", exc_info=True)
    proxies = json.load(open("file/config_.json", "r"))["proxies"]
except FileNotFoundError:
    logger.exception(
        "config_.json needs to be present in 'working directory/file'",
        exc_info=True)
示例#42
0
logger = logging.getLogger()
today = time.strftime("%d%m%Y", time.localtime())
# on met le niveau du logger à DEBUG, comme ça il écrit tout
logger.setLevel(logging.INFO)
# création d'un formateur qui va ajouter le temps, le niveau
# de chaque message quand on écrira un message dans le log
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
# création d'un handler qui va rediriger une écriture du log vers
# un fichier en mode 'append', avec 1 backup et une taille max de 1Mo

file_name1 = catalogue_log + "\\" + today + ".log"
file_handler1 = RotatingFileHandler(file_name1, 'a', 1000000, 1)
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
file_handler1.setLevel(logging.INFO)
file_handler1.setFormatter(formatter)
logger.addHandler(file_handler1)

file_name2 = catalogue_log_debug + "\\" + today + ".log"
file_handler2 = RotatingFileHandler(file_name2, 'a', 1000000, 1)
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
file_handler2.setLevel(logging.DEBUG)
file_handler2.setFormatter(formatter)
logger.addHandler(file_handler2)

# création d'un second handler qui va rediriger chaque écriture de log
# sur la console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.WARNING)
logger.addHandler(stream_handler)
示例#43
0
    def __init__(self,
                 organization,
                 clientId,
                 username,
                 password,
                 logHandlers=None):
        self.organization = organization
        self.username = username
        self.password = password
        self.address = organization + ".messaging.internetofthings.ibmcloud.com"
        self.port = 1883
        self.keepAlive = 60

        self.connectEvent = threading.Event()

        self._recvLock = threading.Lock()
        self._messagesLock = threading.Lock()

        self.messages = 0
        self.recv = 0

        self.clientId = clientId

        # Configure logging
        self.logger = logging.getLogger(self.__module__ + "." +
                                        self.__class__.__name__)
        self.logger.setLevel(logging.INFO)

        if logHandlers:
            if isinstance(logHandlers, list):
                # Add all supplied log handlers
                for handler in logHandlers:
                    self.logger.addHandler(handler)
            else:
                # Add the supplied log handler
                self.logger.addHandler(logHandlers)
        else:
            # Generate a default rotating file log handler and stream handler
            logFileName = '%s.log' % (clientId.replace(":", "_"))
            fhFormatter = logging.Formatter('%(asctime)-25s %(name)-25s ' +
                                            ' %(levelname)-7s %(message)s')
            rfh = RotatingFileHandler(logFileName,
                                      mode='a',
                                      maxBytes=1024000,
                                      backupCount=0,
                                      encoding=None,
                                      delay=True)
            rfh.setFormatter(fhFormatter)

            ch = logging.StreamHandler()
            ch.setFormatter(fhFormatter)
            ch.setLevel(logging.DEBUG)

            self.logger.addHandler(rfh)
            self.logger.addHandler(ch)

        self.client = paho.Client(self.clientId, clean_session=True)

        try:
            self.tlsVersion = ssl.PROTOCOL_TLSv1_2
        except:
            self.tlsVersion = None

        # Configure authentication
        if self.username is not None:
            # In environments where either ssl is not available, or TLSv1.2 is not available we will fallback to MQTT over TCP
            if self.tlsVersion is not None:
                self.port = 8883
                # Path to certificate
                caFile = os.path.dirname(
                    os.path.abspath(__file__)) + "/messaging.pem"
                self.client.tls_set(ca_certs=caFile,
                                    certfile=None,
                                    keyfile=None,
                                    cert_reqs=ssl.CERT_REQUIRED,
                                    tls_version=ssl.PROTOCOL_TLSv1_2)
            else:
                self.logger.warning(
                    "Unable to encrypt messages because TLSv1.2 is unavailable (MQTT over SSL requires at least Python v2.7.9 or 3.4 and openssl v1.0.1)"
                )
            self.client.username_pw_set(self.username, self.password)

        # Attach MQTT callbacks
        self.client.on_log = self.on_log
        self.client.on_connect = self.on_connect
        self.client.on_disconnect = self.on_disconnect
        self.client.on_publish = self.on_publish

        # Initialize default message encoders and decoders.
        self._messageEncoderModules = {}

        self.start = time.time()

        # initialize callbacks
        self._onPublishCallbacks = {}
示例#44
0
from utils.load.config import setting

logging_level = logging.INFO
if setting.logging_debug_level:
    logging_level = logging.DEBUG

logFormatter = logging.Formatter(fmt=setting.logging_format,
                                 datefmt=setting.logging_datefmt)

fileHandler = RotatingFileHandler(filename=setting.logging_filename,
                                  mode='a',
                                  backupCount=2,
                                  maxBytes=setting.logging_file_maxBytes,
                                  encoding=None,
                                  delay=0)
fileHandler.setFormatter(logFormatter)
fileHandler.setLevel(logging_level)

consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)

rootLogger = logging.getLogger('')  # Logging
rootLogger.setLevel(logging.NOTSET)
while rootLogger.handlers:  # Remove un-format logging in Stream, or all of messages are appearing more than once.
    rootLogger.handlers.pop()
rootLogger.addHandler(fileHandler)
rootLogger.addHandler(consoleHandler)

# Disable log messages from the Requests library
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
示例#45
0
model_logger = logging.getLogger('model')

formatter = logging.Formatter(
    '[%(asctime)s][pid:%(process)s-tid:%(thread)s] %(module)s.%(funcName)s: %(levelname)s: %(message)s'
)

# StreamHandler for print log to console
hdr = logging.StreamHandler()
hdr.setFormatter(formatter)
hdr.setLevel(logging.DEBUG)

# RotatingFileHandler
fhr_ana = RotatingFileHandler('%s/analysis.log' % (log_dir_path),
                              maxBytes=10 * 1024 * 1024,
                              backupCount=3)
fhr_ana.setFormatter(formatter)
fhr_ana.setLevel(logging.DEBUG)

# RotatingFileHandler
fhr_pro = RotatingFileHandler('%s/process.log' % (log_dir_path),
                              maxBytes=10 * 1024 * 1024,
                              backupCount=3)
fhr_pro.setFormatter(formatter)
fhr_pro.setLevel(logging.DEBUG)

# RotatingFileHandler
fhr_model = RotatingFileHandler('%s/model.log' % (log_dir_path),
                                maxBytes=10 * 1024 * 1024,
                                backupCount=3)
fhr_model.setFormatter(formatter)
fhr_model.setLevel(logging.DEBUG)
示例#46
0
文件: __init__.py 项目: Jetzzzzz/luda
from ludaweb.controllers import *

# import model
from ludaweb.models.models import db

#### initial logger ####
import logging
from logging.handlers import RotatingFileHandler
import os

LOG_PATH = '/'.join((os.path.dirname(os.path.realpath(__file__)), 'log'))
if not os.path.exists(LOG_PATH):
    os.makedirs(LOG_PATH)

handler = RotatingFileHandler(LOG_PATH + '/debug.log', maxBytes=10000, backupCount=1)
handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
#### initial logger ####


#### initial sesion ####
from datetime import timedelta

app.config['SECRET_KEY'] = 'random'
app.permanent_session_lifetime = timedelta(seconds=60 * 60 * 10)  # session expire time
#### initial sesion ####


#### initial database ####
APP_ROOT = os.path.dirname(os.path.realpath(__file__))
示例#47
0
from logging.handlers import RotatingFileHandler
from logging import StreamHandler
from pathlib import Path
import logging
import os

log_folder_path = str(Path("logs").absolute())

if not os.path.exists(log_folder_path):
    os.makedirs(log_folder_path)

log_file_path = os.path.join(log_folder_path, "log.out")

log_fmt = "%(threadName)s - %(asctime)s - %(name)s - " "%(levelname)s - %(message)s"

logger_formatter = logging.Formatter(log_fmt)

file_logger = RotatingFileHandler(log_file_path,
                                  maxBytes=1024 * 1024 * 10,
                                  backupCount=5)
file_logger.setLevel(logging.DEBUG)
file_logger.setFormatter(logger_formatter)

client_logger = StreamHandler()
client_logger.setLevel(logging.INFO)
client_logger.setFormatter(logger_formatter)
示例#48
0
class SiisLog(object):
    """
    Siis logger initialized based on python logger.
    """
    def __init__(self, options, style=''):
        # if init before terminal
        colorama.init()

        # stderr to terminal in info level
        self.console = TerminalHandler()  #  logging.StreamHandler()
        self.console.setLevel(logging.INFO)

        # self.term_formatter = logging.Formatter('- %(name)-12s: %(levelname)-8s %(message)s')
        self.term_formatter = ColoredFormatter('%(name)-s%(message)s', style)
        self.console.setFormatter(self.term_formatter)

        # add the handler to the root logger
        logging.getLogger('').addHandler(self.console)

        # default log file formatter
        self.file_formatter = logging.Formatter(
            '%(asctime)s %(levelname)s %(message)s')

        # a siis logger with siis.log
        self.file_logger = RotatingFileHandler(options['log-path'] + '/' +
                                               options['log-name'],
                                               maxBytes=1024 * 1024,
                                               backupCount=5)
        # self.file_logger = logging.FileHandler(options['log-path'] + '/' + options['log-name'])
        self.file_logger.setFormatter(self.file_formatter)
        self.file_logger.setLevel(logging.DEBUG)

        self.add_file_logger('siis', self.file_logger)

        # a siis logger with exec.siis.log
        # self.exec_file_logger = logging.FileHandler(options['log-path'] + '/' + "exec." + options['log-name'])
        self.exec_file_logger = RotatingFileHandler(
            options['log-path'] + '/' + "exec." + options['log-name'],
            maxBytes=1024 * 1024,
            backupCount=5)
        self.exec_file_logger.setFormatter(self.file_formatter)
        self.exec_file_logger.setLevel(logging.INFO)

        # don't propagate execution to siis logger
        self.add_file_logger('siis.exec', self.exec_file_logger, False)

        # a siis logger with error.siis.log
        # self.error_file_logger = logging.FileHandler(options['log-path'] + '/' + "error." + options['log-name'])
        self.error_file_logger = RotatingFileHandler(
            options['log-path'] + '/' + "error." + options['log-name'],
            maxBytes=1024 * 1024,
            backupCount=5)
        self.error_file_logger.setFormatter(self.file_formatter)
        self.error_file_logger.setLevel(logging.INFO)

        # don't propagate error trade to siis logger
        self.add_file_logger('siis.error', self.error_file_logger, False)

        # a siis logger with signal.siis.log
        # self.signal_file_logger = logging.FileHandler(options['log-path'] + '/' + "signal." + options['log-name'])
        self.signal_file_logger = RotatingFileHandler(
            options['log-path'] + '/' + "signal." + options['log-name'],
            maxBytes=1024 * 1024,
            backupCount=5)
        self.signal_file_logger.setFormatter(self.file_formatter)
        self.signal_file_logger.setLevel(logging.INFO)

        # don't propagate signal trade to siis logger
        self.add_file_logger('siis.signal', self.signal_file_logger, False)

    def add_file_logger(self,
                        name,
                        handler,
                        level=logging.DEBUG,
                        propagate=True):
        my_logger = logging.getLogger(name)

        my_logger.addHandler(handler)
        my_logger.setLevel(level)
        my_logger.propagate = propagate

        return my_logger
示例#49
0
def create_app(config_class=Config):

    app = Flask(__name__)

    app.config.from_object(config_class)
    app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    momentflask.init_app(app)
    babel.init_app(app)
    UPLOAD_FOLDER_app = (app)
    app.jinja_env.add_extension('jinja2.ext.loopcontrols')

    app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
        if app.config['ELASTICSEARCH_URL'] else None

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp, url_prefix='/auth')

    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    from app.user import bp as user_bp
    app.register_blueprint(user_bp)

    from app.edit import bp as edit_bp
    app.register_blueprint(edit_bp)

    from app.file import bp as file_bp
    app.register_blueprint(file_bp)

    @app.context_processor
    def inject_conf_var():
        return dict(AVAILABLE_LANGUAGES=current_app.config['LANGUAGES'],
                    CURRENT_LANGUAGE=session.get(
                        'language',
                        request.accept_languages.best_match(
                            current_app.config['LANGUAGES'].keys())))

    if not app.debug and not app.testing:
        if app.config['MAIL_SERVER']:
            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr='no-reply@' + app.config['MAIL_SERVER'],
                toaddrs=app.config['ADMINS'],
                subject='myFoxit Failure',
                credentials=auth,
                secure=secure)
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        if not os.path.exists('logs'):
            os.mkdir('logs')
        file_handler = RotatingFileHandler('logs/myFoxit.log',
                                           maxBytes=10240,
                                           backupCount=10)
        file_handler.setFormatter(
            logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                              '[in %(pathname)s:%(lineno)d]'))
        file_handler.setLevel(logging.INFO)
        app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('myFoxit startup')

        if app.config['LOG_TO_STDOUT']:
            stream_handler = logging.StreamHandler()
            stream_handler.setLevel(logging.INFO)
            app.logger.addHandler(stream_handler)
        else:
            if not os.path.exists('logs'):
                os.mkdir('logs')
            file_handler = RotatingFileHandler('logs/myFoxit.log',
                                               maxBytes=10240,
                                               backupCount=10)
            file_handler.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                                  '[in %(pathname)s:%(lineno)d]'))
            file_handler.setLevel(logging.INFO)
            app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('myFoxit startup')

    return app
示例#50
0
log.setLevel(logging.INFO)
LOG_PATH = os.path.abspath("../../logging") \
           if not hasattr(config, 'LOG_PATH') \
           else config.LOG_PATH
os.makedirs(LOG_PATH, exist_ok=True)
error_handler = RotatingFileHandler(os.path.join(LOG_PATH, "errors.log"),
                                    maxBytes=10 * 1024 * 1024,
                                    backupCount=5)
info_handler = RotatingFileHandler(os.path.join(LOG_PATH, "general.log"),
                                   maxBytes=10 * 1024 * 1024,
                                   backupCount=5)
# console_handler = logging.StreamHandler()
# console_handler.setFormatter(formatter)
# console_handler.setLevel(logging.DEBUG)
error_handler.setLevel(logging.ERROR)
error_handler.setFormatter(formatter)
info_handler.setLevel(logging.INFO)
info_handler.setFormatter(formatter)
log.addHandler(error_handler)
# log.addHandler(console_handler)
log.addHandler(info_handler)
log.setLevel(logging.INFO)

HIDE = ['SECRET_KEY']
log.info("##### CONFIGURATION VALUES ###################\n%s" % \
         "\n".join(["\t\t\t%s: %s" % (key, value)
                    for key, value in app.config.items()
                    if key not in HIDE]))

CURRENT_DIR = os.path.abspath(os.curdir)
示例#51
0
class MultiProcessingLog(logging.Handler):
    def __init__(self,
                 filename,
                 mode='a',
                 maxBytes=0,
                 backupCount=0,
                 encoding=None,
                 delay=0):
        logging.Handler.__init__(self)

        self._handler = RotatingFileHandler(filename,
                                            mode=mode,
                                            maxBytes=maxBytes,
                                            backupCount=backupCount,
                                            encoding=encoding,
                                            delay=delay)
        self.queue = multiprocessing.Queue(-1)

        t = threading.Thread(target=self.receive)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt):
        logging.Handler.setFormatter(self, fmt)
        self._handler.setFormatter(fmt)

    def receive(self):
        while True:
            try:
                record = self.queue.get()
                self._handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break
            except:
                traceback.print_exc(file=sys.stderr)

    def send(self, s):
        self.queue.put_nowait(s)

    def _format_record(self, record):
        # ensure that exc_info and args
        # have been stringified.  Removes any chance of
        # unpickleable things inside and possibly reduces
        # message size sent over the pipe
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            dummy = self.format(record)
            record.exc_info = None

        return record

    def emit(self, record):
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            self.handleError(record)

    def close(self):
        self._handler.close()
        logging.Handler.close(self)
示例#52
0
def create_app(config_class=Config):
    app = Flask(__name__)
    app.config.from_object(config_class)

    db.init_app(app)
    migrate.init_app(app, db)
    login.init_app(app)
    mail.init_app(app)
    bootstrap.init_app(app)
    moment.init_app(app)
    babel.init_app(app)
    app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
        if app.config['ELASTICSEARCH_URL'] else None

    from app.errors import bp as errors_bp
    app.register_blueprint(errors_bp)

    from app.auth import bp as auth_bp
    app.register_blueprint(auth_bp, url_prefix='/auth')

    from app.main import bp as main_bp
    app.register_blueprint(main_bp)

    if not app.debug and not app.testing:
        if app.config['MAIL_SERVER']:
            auth = None
            if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
                auth = (app.config['MAIL_USERNAME'],
                        app.config['MAIL_PASSWORD'])
            secure = None
            if app.config['MAIL_USE_TLS']:
                secure = ()
            mail_handler = SMTPHandler(
                mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
                fromaddr='no-reply@' + app.config['MAIL_SERVER'],
                toaddrs=app.config['ADMINS'],
                subject='MoovieTribe Failure',
                credentials=auth,
                secure=secure)
            mail_handler.setLevel(logging.ERROR)
            app.logger.addHandler(mail_handler)

        if app.config['LOG_TO_STDOUT']:
            stream_handler = logging.StreamHandler()
            stream_handler.setLevel(logging.INFO)
            app.logger.addHandler(stream_handler)
        else:
            if not os.path.exists('logs'):
                os.mkdir('logs')
            file_handler = RotatingFileHandler('logs/moovietribe.log',
                                               maxBytes=10240,
                                               backupCount=10)
            file_handler.setFormatter(
                logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
                                  '[in %(pathname)s:%(lineno)d]'))
            file_handler.setLevel(logging.INFO)
            app.logger.addHandler(file_handler)

        app.logger.setLevel(logging.INFO)
        app.logger.info('MoovieTribe startup')

    return app
示例#53
0
SECRET_KEY = '\xa8H\xe4;R@pi:Mo\x92\xe4M\xa7*E\x80\n\x8d\xfav3\xd8'

TIMEOUT = 3600

# Config Logging ...
from app import app
app.debug = True
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('/tmp/flask.log')
file_handler.setLevel(logging.DEBUG)
logging_format = logging.Formatter(
    '%(asctime)s %(levelname)s: %(message)s '
)
file_handler.setFormatter(logging_format)
app.logger.addHandler(file_handler)

class UserOrPassIsNone(HTTPException):
    code = 402
    description = 'User or Password is None'

class ToJson(fields.Raw):
    def format(self, value):
        return json.dumps(value)

my_response_fields = {
    'code'      : fields.Integer(default = 200),
    'result'    : fields.Boolean,
    'message'   : fields.String(default= 'Success'),
    'data'      : ToJson(default= 'Null'),
示例#54
0
***********************************************************
"""
print
copyr

# 日志模块配置
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(levelname)s %(message)s',
                    datefmt='%a, %d %b %Y %H:%M:%S',
                    )
#################################################################################################
# 定义一个RotatingFileHandler,最多备份5个日志文件,每个日志文件最大1M
Rthandler = RotatingFileHandler('./log/deploy.log', maxBytes=1 * 1024 * 1024, backupCount=5)
Rthandler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(filename)s  %(levelname)s   %(message)s')
Rthandler.setFormatter(formatter)
logging.getLogger('').addHandler(Rthandler)
################################################################################################

# logging.debug('This is debug message')
# logging.info('This is info message')
# logging.warning('This is warning message')

# 用户列表
testUser = {'user': '******', 'password': '******'}  # test 用户
rootUser1 = {'user': '******', 'password': '******'}  # root 用户1 www
rootUser3 = {'user': '******', 'password': '******'}  # root 用户2 www
rootUser2 = {'user': '******', 'password': '******'}  # root 用户2 uat

# 生产环境主机列表
wwwHosts = [
示例#55
0
import logging
import time
import zipfile
import os
import re
from logging.handlers import RotatingFileHandler

# logging settings
today_time = time.strftime('%Y-%m-%d', time.localtime())

logger = logging.getLogger('PCMode')
handler = RotatingFileHandler('logs/Design.log',
                              maxBytes=1024 * 1000 * 10,
                              backupCount=2,
                              encoding='utf-8')
handler.setFormatter(logging.Formatter('%(levelname)s - %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO)


def deal_files(zip_file_path):
    name = os.path.splitext(os.path.basename(zip_file_path))[0]
    dirname = os.path.dirname(zip_file_path)
    dst_dir_name = os.path.join(dirname, name)
    if not os.path.exists(dst_dir_name):
        f = zipfile.ZipFile(zip_file_path, 'r')
        for filename in f.namelist():
            f.extract(filename, dirname)


class DesignRead():
示例#56
0
import os
import commands
from datetime import timedelta
import logging
from logging.handlers import RotatingFileHandler

logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

LOG_PATH = '/home/tom/Log/SystemMonitor/logger.txt'
fh = RotatingFileHandler(LOG_PATH, maxBytes=10 * 1024 * 1024, backupCount=5)
fh.setLevel(logging.DEBUG)
fh_formatter = logging.Formatter(
    '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
fh.setFormatter(fh_formatter)

ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch_formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
ch.setFormatter(ch_formatter)

logger.addHandler(fh)
logger.addHandler(ch)


def get_cpu_count():
    cmd = 'cat /proc/cpuinfo | grep -w processor | wc -l'
    status, output = commands.getstatusoutput(cmd)
    if status != 0:
        logging.error('get cpu count failed!')
示例#57
0
    def run(self):

        optparser = optparse.OptionParser(
            usage="usage: %prog [-chHl] start|stop|restart|status",
            conflict_handler='resolve')
        optparser.add_option(
            '-c',
            '--configfile',
            dest='cf',
            type='string',
            action='store',
            metavar='config-file',
            help=
            'specify the path to the config file (default is ./parallel-wsn.config)',
            default=(str(os.getcwd()) + '/parallel-wsn.config'))
        optparser.add_option(
            '-l',
            '--logpath',
            dest='logpath',
            type='string',
            action='store',
            help='define the path for the logfiles (default is ./ (CWD))')
        optparser.add_option(
            '-h',
            '--hosts',
            dest='host_files',
            action='append',
            metavar='HOST_FILE',
            help='hosts file (each line "[user@]host[:port]")')
        optparser.add_option(
            '-H',
            '--host',
            dest='host_strings',
            action='append',
            metavar='HOST_STRING',
            help='additional host entries ("[user@]host[:port]")')
        optparser.add_option('-u',
                             '--user',
                             dest='user',
                             help='username (OPTIONAL)')
        optparser.add_option(
            '-t',
            '--waiting-time',
            dest='waiting_time',
            help=
            'define the time in seconds for parallel-wsn to wait for answers from the sensor nodes (OPTIONAL)'
        )
        optparser.add_option(
            '-v',
            '--verbose',
            dest='verbose',
            action='store_true',
            help='turn on warning and diagnostic messages (OPTIONAL)')

        (options, args) = optparser.parse_args(sys.argv)
        print options
        print args

        #check if verbose option was set
        if options.verbose:
            sys.log_lvl = logging.DEBUG

        #init parser to parse the config-file
        cp = ConfigParser.RawConfigParser()
        print(
            "reading configuration file from %s" % options.cf
        )  #@UndefinedVariable (this is to ignore the error in eclipse) #TODO: if verbose...
        res = cp.read(options.cf)
        if len(res) == 0:
            print "Could not read config-file at %s!\n" % options.cf
            sys.exit(-1)

        #initialize the logger
        if options.logpath:
            path = options.logpath.rstrip('/')
            ensure_dir(path)
        else:
            path = cp.get("main", "logpath")
            if path[0] == "$":
                envvar = os.getenv(path.split()[0][1:])
                if len(path.split()) > 1:
                    path = envvar + path.split()[1]
                else:
                    path = envvar
        print path
        ensure_dir(path)
        logfile = path.rstrip(
            '/') + '/parallel-wsn-' + socket.gethostname() + '.log'
        if options.verbose:
            print "logging to: %s" % logfile

        sys.logger = logging.getLogger('parallel-wsn')
        sys.logger.setLevel(
            sys.log_lvl
        )  #@UndefinedVariable (this is to ignore the error in eclipse)
        lfh = RotatingFileHandler(logfile,
                                  mode='a',
                                  maxBytes=1000000,
                                  backupCount=5)
        sys.logger.addHandler(
            lfh)  #@UndefinedVariable (this is to ignore the error in eclipse)
        formatter = logging.Formatter('%(asctime)s %(message)s')
        lfh.setFormatter(formatter)
        #don't log to console
        sys.logger.propagate = False
        #on init start a new file if existing one is not empty
        if os.path.getsize(logfile) > 0:
            lfh.doRollover()

        global comm_server
        comm_server = communications.ClientCommHandler

        global comm_client
        comm_client = communications.CommClient()

        if not args[1]:
            print "No command specified!"
            sys.logger.error(
                "No command specified"
            )  #@UndefinedVariable (this is to ignore the error in eclipse)
            sys.exit(-1)

        if cp.get("main", "individual_logfiles") == "yes":
            handler = tcp_handler(True, path.rstrip('/'),
                                  cp.get("main", "stdout"), args[1])
        else:
            handler = tcp_handler(False, path.rstrip('/'),
                                  cp.get("main", "stdout"), args[1])

        if os.uname()[1] == "uhu":
            #bind server only to the VLAN ip-address so that nobody outside can access the server
            ip_addr = communications.ip_address().get_ip_address("br-meshnet")
        else:
            #not running on huhu - bind server to all valid ip-addresses of this machine
            ip_addr = "0"

        #server in own thread
        sys.logger.debug(
            "starting TCP-Server on ip %s port %s and putting it in own thread"
            % (ip_addr, int(cp.get("main", "client_port")))
        )  #@UndefinedVariable (this is to ignore the error in eclipse)
        ch = communications.myTCPServer(
            (ip_addr, int(cp.get("main", "client_port"))), comm_server,
            handler)
        ch.allow_reuse_address = True
        ch_thread = threading.Thread(target=ch.serve_forever)
        ch_thread.setDaemon(1)
        ch_thread.start()

        #parse the passed hosts
        sys.logger.debug(
            "parsing the hosts_file at %s" % options.host_files
        )  #@UndefinedVariable (this is to ignore the error in eclipse)
        hosts = hostsParser.read_host_files(options.host_files)

        #if additional host_strings were defined add them too
        if options.host_strings:
            sys.logger.debug(
                "parsing the additional hosts string %s" % options.host_strings
            )  #@UndefinedVariable (this is to ignore the error in eclipse)
            for host_string in options.host_strings:
                res = hostsParser.parse_host_string(host_string)
                if res:
                    hosts.extend(res)

        if not hosts:
            print "No Hosts specified!"
            sys.logger.error(
                "No hosts were specified in either a file or the string (-h or -H option)"
            )  #@UndefinedVariable (this is to ignore the error in eclipse)
            sys.exit(-1)

        if options.verbose:
            print hosts

        daemon_port = cp.get("main", "daemon_port")
        sys.logger.debug(
            "sending command to the host(s) now"
        )  #@UndefinedVariable (this is to ignore the error in eclipse)
        for host in hosts:
            if options.verbose:
                print "sending %s, %s, %s" % (args[1], str(
                    host[0]), (host[1] if host[1] else daemon_port))
            sys.logger.debug(
                "sending %s, %s, %s" % (args[1], str(host[0]),
                                        (host[1] if host[1] else daemon_port))
            )  #@UndefinedVariable (this is to ignore the error in eclipse)
            try:
                comm_client.send(args[1], str(host[0]),
                                 (host[1] if host[1] else daemon_port))
            except:
                sys.logger.error(
                    "Couldn't send %s to %s" % (args[1], str(host[0]))
                )  #@UndefinedVariable (this is to ignore the error in eclipse)
                print("Couldn't send %s to %s" % (args[1], str(host[0])))

        if options.verbose:
            print "waiting for answers"
        if options.waiting_time:
            sys.logger.debug(
                "waiting %s seconds for answers" % options.waiting_time
            )  #@UndefinedVariable (this is to ignore the error in eclipse)
            time.sleep(float(options.waiting_time))
        else:
            sys.logger.debug(
                "waiting %s seconds for answers" %
                cp.get("main", "waiting_time")
            )  #@UndefinedVariable (this is to ignore the error in eclipse)
            time.sleep(float(cp.get("main", "waiting_time")))

        sys.logger.debug(
            "shutting down TCP-Server and exiting system now"
        )  #@UndefinedVariable (this is to ignore the error in eclipse)
        ch.server_close()
        ch.shutdown()
示例#58
0
def configure_logging(identifier, logfile):
    # enable cross-platform colored output
    colorama.init()

    # get the root logger and make it verbose
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    # this allows us to set an upper threshold for the log levels since the
    # setLevel method only sets a lower one
    class UpperThresholdFilter(logging.Filter):
        def __init__(self, threshold, *args, **kwargs):
            self._threshold = threshold
            super(UpperThresholdFilter, self).__init__(*args, **kwargs)

        def filter(self, rec):
            return rec.levelno <= self._threshold

    # use colored output and use different colors for different levels
    class ColorFormatter(logging.Formatter):
        def __init__(self, colorfmt, *args, **kwargs):
            self._colorfmt = colorfmt
            super(ColorFormatter, self).__init__(*args, **kwargs)

        def format(self, record):
            if record.levelno == logging.INFO:
                color = colorama.Fore.GREEN
            elif record.levelno == logging.WARNING:
                color = colorama.Fore.YELLOW
            elif record.levelno == logging.ERROR:
                color = colorama.Fore.RED
            elif record.levelno == logging.DEBUG:
                color = colorama.Fore.CYAN
            else:
                color = ""
            self._fmt = self._colorfmt.format(color, colorama.Style.RESET_ALL)
            return logging.Formatter.format(self, record)

    # configure formatter
    logfmt = "{{}}[%(asctime)s|{}|%(levelname).3s]{{}} %(message)s".format(
        identifier)
    formatter = ColorFormatter(logfmt)

    # configure stdout handler
    stdouthandler = logging.StreamHandler(sys.stdout)
    stdouthandler.setLevel(logging.DEBUG)
    stdouthandler.addFilter(UpperThresholdFilter(logging.INFO))
    stdouthandler.setFormatter(formatter)
    logger.addHandler(stdouthandler)

    # configure stderr handler
    stderrhandler = logging.StreamHandler(sys.stderr)
    stderrhandler.setLevel(logging.WARNING)
    stderrhandler.setFormatter(formatter)
    logger.addHandler(stderrhandler)

    # configure file handler (no colored messages here)
    filehandler = RotatingFileHandler(logfile,
                                      maxBytes=1024 * 1024 * 100,
                                      backupCount=5)
    filehandler.setLevel(logging.DEBUG)
    filehandler.setFormatter(logging.Formatter(logfmt.format("", "")))
    logger.addHandler(filehandler)
示例#59
0
if __name__ == "__main__":

    my_handler = RotatingFileHandler(
        filename='./cisco_data.log',
        mode='a',
        maxBytes=1 * 1024 * 1024,  # 1 MB
        backupCount=10,
        encoding=None,
        delay=0)

    log_level = logging.DEBUG
    app_log = logging.getLogger()
    my_handler.setLevel(log_level)
    formatter = logging.Formatter('%(asctime)s %(levelname)s,%(message)s',
                                  datefmt='%b %d,%Y %H:%M:%S')
    my_handler.setFormatter(formatter)
    app_log.setLevel(log_level)
    app_log.addHandler(my_handler)
    #
    std_out = logging.StreamHandler(sys.stdout)
    std_out.setLevel(log_level)
    std_out.setFormatter(formatter)
    app_log.addHandler(std_out)
    #
    logging.info("Starting cisco_switch_data")
    logging.info("log level: {}".format(logging.getLevelName(log_level)))
    #

    parser = argparse.ArgumentParser()
    parser.add_argument('--init',
                        type=bool,
示例#60
0
def initLogger(
        fileName: str = None,
        appName: str = DEFAULT_APP_NAME,
        logLevel: int = LogLevels.INFO,  # default logging level
        logFormat: str = LOG_FORMAT_2,
        maxFileSize: int = 1 << 24,
        backupCount=BACKUP_COUNT) -> bool:
    """Initializes the logging system.

  Args:
    fileName:
    appName: one word app name (without space/ special chars)
    logLevel:
    logFormat:
    maxFileSize: in bytes
    backupCount:
    
  Returns:
    bool: True if logging setup correctly.
  """
    global _initialized, _rootLogger, _log, ABS_LOG_FILE_NAME
    if _initialized: return True

    # create log file dir
    if fileName:
        dirPath = osp.dirname(fileName)
        absPath = createDir(dirPath)
    else:
        dirPath = LOGS_DIR.format(APP_NAME=appName)
        absPath = createDir(dirPath)
        fileName = LOG_FILE_NAME.format(APP_NAME=appName)

    if not absPath:
        logging.error("%s: Cannot create logging dir: %s", appName, dirPath)
        return False

    absFileName = osp.join(absPath, fileName)
    ABS_LOG_FILE_NAME = absFileName

    logging.info("{APP_NAME}: logs enabled: setting up logging system.".format(
        APP_NAME=appName))

    # set up root logger
    _rootLogger = logging.getLogger()
    _rootLogger.setLevel(logLevel)

    handler = RotatingFileHandler(absFileName,
                                  maxBytes=maxFileSize,
                                  backupCount=backupCount)
    handler.setFormatter(logging.Formatter(logFormat))

    _rootLogger.handlers = []  # remove all previous handlers
    _rootLogger.addHandler(handler)

    _rootLogger.info("%s: Initialized with format : %s", appName,
                     repr(logFormat))

    _log = logging.getLogger(__name__)

    _initialized = True
    return True