示例#1
0
def setup_logging(lf_enabled=True, lc_enabled=True):

    logger = logging.getLogger('sonarr_youtubedl')
    logger.setLevel(logging.INFO)
    log_format = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    if lf_enabled:
        # setup logfile
        log_file = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..', 'logs'))
        log_file = os.path.abspath(log_file + '/sonarr_youtubedl.log')
        loggerfile = RotatingFileHandler(log_file,
                                         maxBytes=5000000,
                                         backupCount=5)
        loggerfile.setLevel(logging.INFO)
        loggerfile.set_name('FileHandler')
        loggerfile.setFormatter(log_format)
        logger.addHandler(loggerfile)

    if lc_enabled:
        # setup console log
        loggerconsole = logging.StreamHandler()
        loggerconsole.setLevel(logging.INFO)
        loggerconsole.set_name('StreamHandler')
        loggerconsole.setFormatter(log_format)
        logger.addHandler(loggerconsole)

    return logger
示例#2
0
def initLogFile(formatDict):
    noHandler = True
    # Initialize noHandle
    for handler in log.handlers:  # Iterate over all ha
        if handler.get_name() == formatDict['name']:  # If handler name mat
            noHandler = False
            # Set no handler fals
            break
            # Break for loop

    if noHandler:
        logDir = os.path.dirname(formatDict['file'])
        if not os.path.isdir(logDir):
            os.makedirs(logDir)
        rfh = RotatingFileHandler(formatDict['file'], **ROTATING_FORMAT)
        rfh.setFormatter(formatDict['formatter'])
        rfh.setLevel(formatDict['level'])  # Set the logging lev
        rfh.set_name(formatDict['name'])  # Set the log name
        log.addHandler(rfh)  # Add hander to the m

        info = os.stat(formatDict['file'])
        # Get information abo
        if (info.st_mode & formatDict['permissions']
            ) != formatDict['permissions']:  # If the permissions
            try:  # Try to
                os.chmod(formatDict['file'], formatDict['permissions'])
                # Set the permissions
            except:
                log.info(
                    'Failed to change log permissions; this may cause issues')
示例#3
0
def configureLogger(loggerfilepath):
    """
    Configuracion general del logger
    """

    ch = logging.StreamHandler(sys.stdout)
    ch.set_name("logmapper")
    ch.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s'))

    root = logging.getLogger()
    root.setLevel(logging.DEBUG)
    for handler in root.handlers:
        root.removeHandler(handler)
    root.addHandler(ch)

    fileHandler = logging.FileHandler(loggerfilepath)
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fileHandler.setFormatter(formatter)
    #    root.addHandler(fileHandler)

    rotateHandler = RotatingFileHandler(loggerfilepath,
                                        maxBytes=10000000,
                                        backupCount=2)
    rotateHandler.set_name('rotateHandler')
    rotateHandler.setFormatter(
        logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    rotateHandler.setLevel(logging.INFO)
    root.addHandler(rotateHandler)
示例#4
0
def _setup_file_logging(logger, log_path):
    # Create paths and files as needed.
    log_path = Path(log_path)
    os.makedirs(str(log_path.parent), mode=0x0755, exist_ok=True)
    log_path.touch(mode=0o0664, exist_ok=True)

    handler = RotatingFileHandler(str(log_path), maxBytes=1e7)
    handler.rotator = _gzip_rotator
    handler.namer = _gzip_namer
    handler.setLevel(LOG_LEVEL_FILE)
    formatter = CustomLoggingFormatter(LOG_FMT, DATE_FMT)
    handler.setFormatter(formatter)
    name = "File Handler"
    handler.set_name(name)
    if name not in [h.name for h in logger.handlers]:
        logger.addHandler(handler)
        logger.info("File logging initialized")
示例#5
0
文件: logger.py 项目: phi-grib/flame
def get_logger(name) -> logging.Logger:
    """ inits a logger and adds the handlers.

    If the logger is already created doesn't adds new handlers
    since those are set at interpreter level and already exists.
    """
    # create logger
    logger = logging.getLogger(name)

    # set base logger level to DEBUG but fine tu the handlers
    # for custom level
    logger.setLevel(logging.DEBUG)

    # create formatter fdor file handler (more explicit)
    file_formatter = logging.Formatter(
        '%(levelname)-8s [%(asctime)s] %(thread)d - %(name)s - %(message)s')

    # formater for stream handler (less info)
    stdout_formatter = logging.Formatter('%(levelname)s - %(message)s')

    log_file = get_log_file()  # Create the log file
    # create console and file handler
    # if not already created
    if not logger.handlers:

        # Send DEBUG to a rotating log file
        # Limit the size to 1000000Bytes ~ 1MB
        fh = RotatingFileHandler(log_file, maxBytes=1000000, backupCount=3)
        fh.set_name('filehandler')
        fh.setLevel('DEBUG')
        fh.setFormatter(file_formatter)
        logger.addHandler(fh)

        # send INFO to the console (stdin)
        ch = logging.StreamHandler(sys.stdout)
        ch.set_name('streamhandler')
        ch.setLevel('INFO')
        ch.setFormatter(stdout_formatter)
        logger.addHandler(ch)

        return logger

    # if there already handlers just return the logger
    # since its already configured
    else:
        return logger
示例#6
0
文件: core.py 项目: jkchen2/JshBot
def start(start_file=None):
    if start_file:
        path = os.path.split(os.path.realpath(start_file))[0]
        logging.debug("Setting directory to " + path)
        docker_mode = False
    else:  # Use Docker setup
        path = '/external'
        logging.info("Bot running in Docker mode.")
        logging.debug("Using Docker setup path, " + path)
        docker_mode = True

    try:
        config_file_location = path + '/config/core-config.yaml'
        with open(config_file_location, 'rb') as config_file:
            config = yaml.safe_load(config_file)
            selfbot_mode, token, debug = config['selfbot_mode'], config['token'], config['debug']
    except Exception as e:
        logging.error("Could not determine token /or selfbot mode.")
        raise e

    if selfbot_mode is True:  # Explicit, for YAML 1.2 vs 1.1
        client_type = discord.Client
        logging.debug("Using standard client (selfbot enabled).")
    else:
        client_type = discord.AutoShardedClient
        logging.debug("Using autosharded client (selfbot disabled).")

    # Set debug logs
    if debug is True:
        log_file = '{}/temp/debug_logs.txt'.format(path)
        if os.path.isfile(log_file):
            shutil.copy2(log_file, '{}/temp/last_debug_logs.txt'.format(path))
        file_handler = RotatingFileHandler(log_file, maxBytes=5000000, backupCount=5)
        file_handler.set_name('jb_debug_file')
        stream_handler = logging.StreamHandler()
        stream_handler.set_name('jb_debug_stream')
        logging.basicConfig(level=logging.DEBUG, handlers=[file_handler, stream_handler])

    # Set regular logs
    else:
        log_file = '{}/temp/logs.txt'.format(path)
        file_handler = RotatingFileHandler(log_file, maxBytes=5000000, backupCount=5)
        file_handler.setFormatter(logging.Formatter(
            '[%(filename)s] %(asctime)s %(levelname)s: %(message)s'))
        file_handler.setLevel(logging.DEBUG)
        file_handler.set_name('jb_log_file')
        logger.addHandler(file_handler)
        logger.setLevel(logging.DEBUG)

    def safe_exit():
        loop = asyncio.get_event_loop()
        try:  # From discord.py client.run
            loop.run_until_complete(bot.logout())
            pending = asyncio.Task.all_tasks()
            gathered = asyncio.gather(*pending)
        except Exception as e:
            logger.error("Failed to log out. %s", e)
        try:
            gathered.cancel()
            loop.run_until_complete(gathered)
            gathered.exception()
        except:
            pass
        logger.warn("Bot disconnected. Shutting down...")
        bot.shutdown()  # Calls sys.exit

    def exception_handler(loop, context):
        e = context.get('exception')
        if e and e.__traceback__:
            traceback_text = ''.join(traceback.format_tb(e.__traceback__))
        else:
            traceback_text = traceback.format_exc()
        if not traceback_text:
            traceback_text = '(No traceback available)'
        error_message = '{}\n{}'.format(e, traceback_text)
        logger.error("An uncaught exception occurred.\n%s", error_message)
        with open(path + '/temp/error.txt', 'w') as error_file:
            error_file.write(error_message)
        logger.error("Error file written.")
        if bot.is_closed():
            safe_exit()

    loop = asyncio.get_event_loop()
    bot = get_new_bot(client_type, path, debug, docker_mode)
    start_task = bot.start(token, bot=not selfbot_mode)
    loop.set_exception_handler(exception_handler)
    try:
        loop.run_until_complete(start_task)
    except KeyboardInterrupt:
        logger.warn("Interrupted!")
        safe_exit()
示例#7
0
文件: logger.py 项目: casucode/tools
def log_gen(level='DEBUG',
            logfile=None,
            threads=False,
            log_only=False,
            email_level=None,
            email_data={},
            format=None):
    """
 *+
 *  Name:
 *      log_gen
 *
 *  Purpose:
 *      Generate a python logging instance
 *
 *  Description:
 *      This method provides the mechanism to generate a python logger for
 *      recording structured information with levels from DEBUG to CRITICAL.
 *      Optional email escalation for submissions above a particular level
 *      is available.
 *      
 *      File output is via a rotating file handler that will archive the log 
 *      when it reaches 5MB in size. The output format of the log entry is:
 *      
 *      Day nn Month hr:mm:ss yyyy @module.<function>() [level] [thread_id] : message
 *
 *  Arguments:
 *      level ['DEBUG']
 *          The minimum reporting level to output
 *      logfile [None]
 *          String to output logfile if required
 *      threads [False]
 *          Will multiple threads be generating log entries?
 *      log_only [False]
 *          Only output to the logfile (do not output to stdout)
 *      email_level [None]
 *          Define minimum loglevel for email escalation
 *      email_data [{}]
 *          Python dictionary containing information needed for email messaging. Keys:
 *                 'FROM'    : The email address of the author
 *                 'TO'      : The target email address
 *                 'SUBJECT' : Subject line for the email
 *                 'BUFFER'  : Number of lines before the buffer is flushed (ie, send email)
 *      
 *  Returned values:
 *      Instance of <logging.RootLogger>
 *
 *  Notes:
 *      None
 *
 *  Dependencies:
 *      Python core 
 *
 *  Authors:
 *      David Murphy (CASU, IoA)
 *
 *  Copyright:
 *      Copyright (C) 2017-2018 Cambridge Astronomy Survey Unit.
 *      All Rights Reserved.
 *
+*  
    """
    import sys
    a = 1
    import logging
    from logger import ColourFormatter

    if level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
        #print "defined level '%s' not recognised. Setting to DEBUG" %(level)
        level = 'DEBUG'

    log = logging.getLogger()
    custom_format = 'auto'

    if not len(log.handlers):
        log.setLevel(level)
        if threads:
            formatter = logging.Formatter(
                '%(asctime)s @%(module)s.%(funcName)s() [%(levelname)s] [%(threadName)s] : %(message)s',
                datefmt="%a %d %b %H:%M:%S %Y")
        else:
            formatter = logging.Formatter(
                '%(asctime)s @%(module)s.%(funcName)s() [%(levelname)s] : %(message)s',
                datefmt="%a %d %b %H:%M:%S %Y")

        if format:
            formatter = logging.Formatter(format)
            custom_format = format

        if email_level != None:
            from logger import SMTPBufferHandler
            MAILHOST = 'smtp.ast.cam.ac.uk'
            try:
                FROM = email_data['FROM']
            except KeyError:
                FROM = '*****@*****.**'
            try:
                TO = email_data['TO']
            except:
                TO = '*****@*****.**'
            try:
                SUBJECT = email_data['SUBJECT']
            except:
                SUBJECT = 'A critical message from your server'
            try:
                buffer_size = email_data['BUFFER']
            except:
                buffer_size = 1
            eh = SMTPBufferHandler(MAILHOST, FROM, TO, SUBJECT, buffer_size)
            eh.setLevel('CRITICAL')
            eh.setFormatter(formatter)
            eh.set_name('email_stream')
            log.addHandler(eh)

        if (not log_only) or (not logfile):
            ch = logging.StreamHandler(sys.stdout)
            ch.setLevel(level)
            ch.setFormatter(formatter)

            ch.setFormatter(
                ColourFormatter(threads=threads, format=custom_format))
            ch.set_name('stdout')
            log.addHandler(ch)

        if logfile:
            from logging.handlers import RotatingFileHandler
            fh = RotatingFileHandler(logfile, maxBytes=5242880, backupCount=20)
            #                    fh = logging.FileHandler(logfile)
            fh.setLevel(level)
            fh.setFormatter(formatter)
            fh.set_name('log_file')
            log.addHandler(fh)
    return log