示例#1
0
文件: utils.py 项目: xbee/zenodo
def get_file_logger(logfile, audit_type, audit_id):
    """Return a buffered file logger."""
    logger = logging.getLogger('zenodo.auditor.{type}.{id}'.format(
        type=audit_type, id=audit_id))
    if logfile:
        file_handler = logging.FileHandler(logfile, mode='w')
        logger.addHandler(MemoryHandler(100, target=file_handler))
    return logger
    def setup_memory_logging_handler(cls) -> None:
        if cls._pytest_active() and cls.memory_handler:
            return

        cls.memory_handler = MemoryHandler(capacity=maxsize,
                                           flushLevel=maxsize)

        root = getLogger()
        root.setLevel(NOTSET)
        root.addHandler(cls.memory_handler)
示例#3
0
文件: Logs.py 项目: Mokl/SP4L
def make_mem_logger(name, to_log, size=8192):
    from logging.handlers import MemoryHandler
    logger = logging.getLogger(name)
    hdlr = MemoryHandler(size, target=to_log)
    formatter = logging.Formatter('%(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.memhandler = hdlr
    logger.setLevel(logging.DEBUG)
    return logger
示例#4
0
    def make_memory(capacity, flushlevel, target):
        """
        Same as make(), but with a memory buffer, flushed when full or
        on critical events.

        :param capacity: memory handler capacity
        :param flushlevel: memory handler flush level.
        :return: memory handler
        :rtype: MemoryHandler
        """
        return MemoryHandler(capacity, flushLevel=flushlevel, target=target)
示例#5
0
 def __update_logs_configuration(self):
     try:
         logs_conf_file_path = self.__gateway._config_dir + 'logs.conf'
         with open(logs_conf_file_path, 'w') as logs:
             logs.write(self.__new_logs_configuration + "\r\n")
         fileConfig(logs_conf_file_path)
         self.__gateway.main_handler = MemoryHandler(-1)
         self.__gateway.remote_handler = TBLoggerHandler(self.__gateway)
         self.__gateway.main_handler.setTarget(
             self.__gateway.remote_handler)
         log.debug("Logs configuration has been updated.")
     except Exception as e:
         log.exception(e)
示例#6
0
文件: Logs.py 项目: solarblue/waf
def make_mem_logger(name, to_log, size=8192):
    """
	Creates a memory logger to avoid writing concurrently to the main logger
	"""
    from logging.handlers import MemoryHandler
    logger = logging.getLogger(name)
    hdlr = MemoryHandler(size, target=to_log)
    formatter = logging.Formatter('%(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.memhandler = hdlr
    logger.setLevel(logging.DEBUG)
    return logger
示例#7
0
def add_handler(handler):
    """
    Add a new handler, wrapped in a MemoryHandler for buffering.
    """
    # Use MemoryHandler as an intermediary for buffering. Essentially: we don't
    # want to flush every message, since that presents a severe scalability
    # issue.
    mem_handler = MemoryHandler(32, target=handler)
    mem_handler.setFormatter(logger_formatter)
    # Real filtering will be done by the logger itself
    mem_handler.setLevel(DEBUG)
    MEM_HANDLERS.append(mem_handler)
    base_logger.addHandler(mem_handler)
示例#8
0
def get_logger(path):
    rotating_handler = TimedRotatingFileHandler(path, when='d', backupCount=7)
    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
    rotating_handler.setFormatter(formatter)
    memory_handler = MemoryHandler(capacity=512 * 1024,
                                   target=rotating_handler)
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    logger = logging.getLogger("luftdaten")
    logger.setLevel(logging.DEBUG)
    logger.addHandler(memory_handler)
    logger.addHandler(console_handler)
    return logger
示例#9
0
def init() -> None:
    """Call this on first import. Don't call it again later.

    This sets up the default loggers and logging locations.
    """
    global _is_inited
    if _is_inited:
        raise RuntimeError('This is a "singleton module". Only init() once.')
    _is_inited = True

    root_logger = logging.getLogger()
    # We need to default to DEBUG in order to be able to filter downstream.
    root_logger.level = logging.DEBUG
    root_logger.name = 'pyodine'

    # Log to files in two separate locations.

    _setup_log_dir(PRIMARY_LOG_LOCATION)  # Will raise if primary logging can't work.
    _VALID_LOG_LOCATIONS.append(PRIMARY_LOG_LOCATION)
    try:
        _setup_log_dir(SECONDARY_LOG_LOCATION)
    except OSError:
        logging.error("Can't set up secondary log location!")
    else:
        _VALID_LOG_LOCATIONS.append(SECONDARY_LOG_LOCATION)
    # We need to specify 3600 seconds here instead of one hour, to force
    # detailed file name suffixes for manual log rotation.  This may lead to
    # problems if the program is started/stopped multiple times per second.
    writers = [TimedRotatingFileHandler(directory + PROGRAM_LOG_DIR + PROGRAM_LOG_FILE,
                                        when='s', interval=3600)
               for directory in _VALID_LOG_LOCATIONS]
    for writer in writers:
        writer.doRollover()  # Start a new file every time pyodine is run.
        writer.formatter = logging.Formatter(
            "{asctime} {name} {levelname} - {message} [{module}.{funcName}]",
            style='{')

    buffers = [MemoryHandler(200, target=writer) for writer in writers]

    for log_buffer in buffers:
        root_logger.addHandler(log_buffer)

    # Log to stderr.

    stderr = logging.StreamHandler()
    stderr.setLevel(logging.INFO)
    stderr.formatter = logging.Formatter(
        "{levelname:<7} {message} "
        "[{module}:{lineno}] ({name})", style='{')
    root_logger.addHandler(stderr)
示例#10
0
def add_module_logger_to_schoollib():
    global _module_handler
    if _module_handler is None:
        module_handler = ModuleHandler(udebug_facility=ud.MODULE)
        _module_handler = MemoryHandler(-1,
                                        flushLevel=logging.DEBUG,
                                        target=module_handler)
        _module_handler.setLevel(logging.DEBUG)
        logger.addHandler(_module_handler)
    else:
        logger.info(
            'add_module_logger_to_schoollib() should only be called once! Skipping...'
        )
    return _module_handler
示例#11
0
    def scan_package(
        self,
        parsed_args: argparse.Namespace,
        count: int,
        package: Package,
        num_packages: int,
    ) -> Optional[Dict[str, List[Issue]]]:
        """Scan each package in a separate process while buffering output."""
        logger = logging.getLogger()
        old_handler = None
        if logger.handlers[0]:
            old_handler = logger.handlers[0]
            handler = MemoryHandler(10000,
                                    flushLevel=logging.ERROR,
                                    target=old_handler)
            logger.removeHandler(old_handler)
        logger.addHandler(handler)

        logging.info("-- Scanning package %s (%d of %d) --", package.name,
                     count, num_packages)

        sio = io.StringIO()
        old_stdout = sys.stdout
        old_stderr = sys.stderr
        sys.stdout = sio
        sys.stderr = sio

        issues, dummy = self.run(package.path, parsed_args)

        sys.stdout = old_stdout
        sys.stderr = old_stderr
        logging.info(sio.getvalue())

        if issues is not None:
            logging.info(
                "-- Done scanning package %s (%d of %d) --",
                package.name,
                count,
                num_packages,
            )
        else:
            logging.error("Failed to run statick on package %s!", package.name)

        if old_handler is not None:
            handler.flush()
            logger.removeHandler(handler)
            logger.addHandler(old_handler)

        return issues
示例#12
0
文件: main.py 项目: incumbent/sdrl
def runExperiment(opt, visualize_steps, visualize_learning, visualize_performance, q):
    # Experiment要在子进程中创建,不能直接传创建好的对象(会影响logger的正常工作)
    exp = ExperimentFactory.get(**opt)

    # 给logger加handler
    # 子进程的log->MemoryHandler->OutputHandler-> queue <-ExpOutputDialog.Receiver->SIGNAL->QTextEdit
    # log通过queue在进程间传递,主线程通过thread接收queue中的新消息
    from logging.handlers import MemoryHandler
    handler = MemoryHandler(capacity=1024, flushLevel=logging.INFO, target=OutputHandler(q))
    exp.logger.addHandler(handler)

    exp.run(visualize_steps=visualize_steps,  # should each learning step be shown?
           visualize_learning=visualize_learning,  # show policy / value function?
           visualize_performance=visualize_performance)  # show performance runs?
    exp.plot()
示例#13
0
    def __init__(self, name=None, level=logging.DEBUG, formatter=None):
        self.buffer = []
        self.level = level
        if formatter is None:
            self.formatter = logging.Formatter(logging.BASIC_FORMAT)
        else:
            self.formatter = formatter

        self.log = logging.getLogger(name)
        self.old_handlers = self.log.handlers[:]  # .copy()
        self.old_level = self.log.level
        self.log.setLevel(level)
        self.log.handlers = [
            MemoryHandler(capacity=0, flushLevel=level, target=self)
        ]
示例#14
0
def create_logging_handler_for_collection(tempdir, prefix):
    from sys import maxsize
    from os import path
    from logging import FileHandler, DEBUG, Formatter
    from logging.handlers import MemoryHandler
    target = FileHandler(
        path.join(tempdir, "collection-logs",
                  "{}.{}.debug.log".format(prefix, get_timestamp())))
    target.setFormatter(Formatter(**LOGGING_FORMATTER_KWARGS))
    handler = MemoryHandler(maxsize, target=target)
    handler.setLevel(DEBUG)
    try:
        yield handler
    finally:
        handler.close()
        target.close()
示例#15
0
    def __init__(self, test=False):
        """Bare-bones initialization.

        The only thing done here is setting up the logging infrastructure.
        """
        self.executable = basename(argv[0])
        self.test = test
        if self.test:
            nh = logging.NullHandler()
            mh = MemoryHandler(1000000, flushLevel=logging.CRITICAL, target=nh)
            logging.getLogger(__name__).addHandler(mh)
        else:  # pragma: no cover
            logging.basicConfig(format=self.executable +
                                ' [%(name)s] Log - %(levelname)s: %(message)s',
                                datefmt='%Y-%m-%dT%H:%M:%S')
        return
示例#16
0
    def __init__(self, tests, name="", level=DEBUG, stdout=sys.stdout):
        super(LoggingSuite, self).__init__()
        self.handler = MemoryHandler(1)
        self._fixture = LogHandler(self.handler, name=name, level=level)
        self._stdout = stdout

        # Euristically figure out if we're being passed a single test/suite
        # or a list of tests. In particular, in case of a single suite we
        # don't want addTests() to unwrap it by iterating through its tests,
        # since that would prevent its run() method from being run and by-pass
        # possible custom logic (e.g. testresources.OptimisingTestSuite).
        if safe_hasattr(tests, "run"):
            add = self.addTest
        else:
            add = self.addTests
        add(tests)
示例#17
0
def setup_file_handler(filename):
    # check permissions of writing the log file first (which fails when changing users)
    log_file = os.path.join(Settings.get('client/logs/path'), filename)
    try:
        with open(log_file, "a") as f:
            pass
    except IOError as e:
        set_data_path_permissions()
    rotate = RotatingFileHandler(os.path.join(Settings.get('client/logs/path'),
                                              filename),
                                 maxBytes=int(
                                     Settings.get('client/logs/max_size')),
                                 backupCount=1)
    rotate.setFormatter(
        logging.Formatter(
            '%(asctime)s %(levelname)-8s %(name)-30s %(message)s'))
    return MemoryHandler(int(Settings.get('client/logs/buffer_size')),
                         target=rotate)
示例#18
0
    def __init__(self, name=None, level=logging.DEBUG, formatter=None):
        """
        To get the logger name, execute this in `./manage.py shell` e.g.:

        import logging;print("\n".join(sorted(logging.Logger.manager.loggerDict.keys())))
        """
        self.buffer = []
        self.level = level
        if formatter is None:
            self.formatter = logging.Formatter(logging.BASIC_FORMAT)
        else:
            self.formatter = formatter

        self.log = logging.getLogger(name)
        self.old_handlers = self.log.handlers[:]  # .copy()
        self.old_level = self.log.level
        self.log.setLevel(level)
        self.log.handlers.append(
            MemoryHandler(capacity=0, flushLevel=level, target=self))
示例#19
0
def log_register(log_name):
    """
    Acquire a logger object, initialized with the global level and output options

    :param log_name: logger name (as in `logging.getLogger`), will be prefixed to log lines
    :return: A `logging.Logger` instance.
    """
    logger = logging.getLogger(log_name)
    logger.setLevel(settings.LOG_LEVEL)

    if _enable_stdout:
        _add_stream_handler(logger)
    if _log_filename:
        _add_file_handler(logger)
    if not _log_filename and not _enable_stdout:
        # Prevent 'No handlers could be found' spam
        logger.addHandler(MemoryHandler(0))

    _loggers.add(logger)
    return logger
示例#20
0
def set_up_root_logger(level=logging.DEBUG):
    """
    Sets up the root logger.

    Returns a tuple containing the root logger, the memory handler and the
    console handler.
    """

    root_logger = logging.getLogger("")
    root_logger.setLevel(level)

    console = logging.StreamHandler()

    console_formatter = \
        logging.Formatter("%(name)-12s: %(levelname)-8s %(message)s")
    console.setFormatter(console_formatter)

    memory_handler = MemoryHandler(MEMORY_LOGGER_CAPACITY)
    root_logger.addHandler(memory_handler)

    return (root_logger, memory_handler, console)
示例#21
0
def getLogger():
    global logger

    if logger is None:
        logger = logging.getLogger(OMNIPY_LOGGER)
        logger.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')

        fh = logging.FileHandler(OMNIPY_LOGFILE)
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(formatter)

        mh = MemoryHandler(capacity=256*1024, target=fh)
        logger.addHandler(mh)

        ch = logging.StreamHandler()
        ch.setLevel(logging.DEBUG)
        ch.setFormatter(formatter)
        logger.addHandler(ch)

    return logger
示例#22
0
def _get_qty_logger(name: str) -> logging.Logger:
    name = str(name)
    if not name.isidentifier():
        raise ValueError("Invalid log ID \"{}\". Only valid python "
                         "identifiers are allowed for log IDs.".format(name))

    logger_name = QTY_LOGGER_PREFIX + name

    # Actually the logging class provides a singleton behaviour of Logger
    # objects. We keep our own list however, as we need some specific
    # configuration and handlers attached.
    global _LOGGERS
    try:
        return _LOGGERS[logger_name]
    except KeyError:
        # Create the logger.

        # We need to specify 3600 seconds here instead of one hour, to force
        # detailed file name suffixes for manual log rotation.
        writers = [TimedRotatingFileHandler(directory + QTY_LOG_DIR + str(name) + '.log',
                                            when='s', interval=3600)
                   for directory in _VALID_LOG_LOCATIONS]
        for writer in writers:
            writer.formatter = logging.Formatter("{asctime}\t{message}", style='{')
            # Start a new file for each pyodine run.
            writer.doRollover()

        # Buffer file writes to keep I/O down. We will flush the buffer at
        # given time intervals. If that flushing should fail, however, we'll
        # flush at 100 entries (which is about 4kB of data).
        buffers = [MemoryHandler(100, target=writer) for writer in writers]

        logger = logging.getLogger(logger_name)
        for log_buffer in buffers:
            logger.addHandler(log_buffer)
        logger.propagate = False  # Don't pass messages to root logger.
        _LOGGERS[logger_name] = logger

        return logger
示例#23
0
def CustomLogger(filename,
                 category="",
                 rotate="",
                 buffer=10 * 1024,
                 utc=False,
                 backupCount=0):
    assert isinstance(category, str)
    assert (category != "root") or (category == "")

    logger = logging.getLogger(category)

    ## Check if the hander is already registered...
    # http://stackoverflow.com/q/15870380
    if logger.handlers:
        return logger
    else:
        # http://stackoverflow.com/a/34125235
        logLevel = logging.DEBUG
        formatter = logging.Formatter('%(message)s')
        streamhandler = logging.StreamHandler(sys.stderr)
        streamhandler.setLevel(logLevel)
        streamhandler.setFormatter(formatter)
        memoryhandler = MemoryHandler(capacity=buffer,
                                      flushLevel=logging.ERROR,
                                      target=streamhandler)
        filehandler = TimedRotatingFileHandler(filename,
                                               when=rotate,
                                               utc=utc,
                                               backupCount=backupCount)
        filehandler.suffix = "%Y-%m-%d"  # http://stackoverflow.com/a/338566
        filehandler.setLevel(logLevel)
        filehandler.setFormatter(formatter)

        logger.setLevel(logLevel)
        logger.addHandler(memoryhandler)
        logger.addHandler(filehandler)

        return logger, memoryhandler
 def __update_logs_configuration(self):
     try:
         if self.__old_logs_configuration == self.__new_logs_configuration:
             remote_handler_current_state = self.__gateway.remote_handler.activated
             remote_handler_current_level = self.__gateway.remote_handler.current_log_level
             logs_conf_file_path = self.__gateway._config_dir + 'logs.conf'
             with open(logs_conf_file_path, 'w') as logs:
                 logs.write(self.__new_logs_configuration + "\r\n")
             fileConfig(logs_conf_file_path)
             self.__gateway.main_handler = MemoryHandler(-1)
             self.__gateway.remote_handler = TBLoggerHandler(self.__gateway)
             self.__gateway.main_handler.setTarget(
                 self.__gateway.remote_handler)
             if remote_handler_current_level != 'NOTSET':
                 self.__gateway.remote_handler.activate(
                     remote_handler_current_level)
             if not remote_handler_current_state:
                 self.__gateway.remote_handler.deactivate()
             global log
             log = getLogger('service')
             log.debug("Logs configuration has been updated.")
     except Exception as e:
         log.exception(e)
示例#25
0
def set_up_logger(log_level=logging.DEBUG):
    stream_handler = logging.StreamHandler()
    stream_handler.setLevel(log_level)
    stream_handler.setFormatter(logging.Formatter(fmt=LOGGING_MSG_FORMAT,
                                                  datefmt=LOGGING_DATE_FORMAT))
    logger.setLevel(log_level)
    if CURRENT_ENV != 'Product':
        logger.addHandler(stream_handler)
        return

    memory_handler = MemoryHandler(
        capacity=64,
        flushLevel=logging.ERROR,
        target=stream_handler
    )
    memory_handler.setFormatter(logging.Formatter(fmt=LOGGING_MSG_FORMAT,
                                                  datefmt=LOGGING_DATE_FORMAT))
    logger.addHandler(memory_handler)

    def flush():
        memory_handler.flush()
    atexit.register(flush)
    logger.debug("Logger init")
示例#26
0
def log_if_errors(logger, target_handler=None, flush_level=None, capacity=None):
    if target_handler is None:
        target_handler = logging.StreamHandler()
    if flush_level is None:
        flush_level = logging.ERROR
    if capacity is None:
        capacity = 100
    handler = MemoryHandler(capacity, flushLevel=flush_level, target=target_handler)

    def decorator(fn):
        def wrapper(*args, **kwargs):
            logger.addHandler(handler)
            try:
                return fn(*args, **kwargs)
            except Exception:
                logger.exception('call failed')
                raise
            finally:
                super(MemoryHandler, handler).flush()
                logger.removeHandler(handler)
        return wrapper

    return decorator
示例#27
0
import logging
from logging.handlers import MemoryHandler
from IPython.core.magic import (Magics, magics_class, line_magic)

from collections import deque
logMessageBufferSize = 200
logMessages=deque([], logMessageBufferSize)

class PixiedDustLoggingHandler(logging.Handler):
    def emit(self, record):
        logMessages.append((record.levelno, self.format(record)))

#init pixiedust logging
pixiedustHandler = PixiedDustLoggingHandler()
pixiedustHandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
memHandler = MemoryHandler(1, target=pixiedustHandler)
memHandler.setLevel(logging.DEBUG)

pixiedustLogger = logging.getLogger("PixieDust")
pixiedustLogger.addHandler(memHandler)
pixiedustLogger.setLevel(logging.DEBUG)

@magics_class
class PixiedustLoggingMagics(Magics):
    @line_magic
    def pixiedustLog(self, arg_s):
        try:
            opts,args = self.parse_options( arg_s, "l:f:m:")
            level = logging.getLevelName( opts.get("l", "INFO").upper() )
            if not isinstance(level, int):
                level = logging.getLevelName("INFO")
# along with Case Conductor.  If not, see <http://www.gnu.org/licenses/>.
import httplib
from itertools import ifilter
import json
import logging
from logging import Formatter, Filter
from logging.handlers import MemoryHandler

from django.template.loader import render_to_string

from ..core.conf import conf


log = logging.getLogger("ccui.core.log.api")

handler = MemoryHandler(capacity=conf.CC_DEBUG_API_LOG_RECORDS)



class NoDebugFilter(Filter):
    def filter(self, record):
        if record.args.get("url", "").startswith("/debug/"):
            return False
        return True



if conf.CC_DEBUG_API_LOG:
    log.setLevel(logging.DEBUG)
    log.addHandler(handler)
    ui_req_log = logging.getLogger("ccui.core.middleware.RequestLogMiddleware")
示例#29
0
            sys.stderr = self
        else:
            sys.stdout = sys.__stdout__  #old_stdout
            sys.stderr = sys.__stderr__


#adds a stream catcher for display and a memory handler for saving
log_stream = StreamCatch()
logger = getLogger()
display_handler = StreamHandler(stream=log_stream)
display_handler.setLevel(LOGLEVEL)
display_handler.setFormatter(Formatter(LOGFORMATTER))
display_handler.name = "StreamCatch"
logger.addHandler(display_handler)

memory_handler = MemoryHandler(MEMBUFFER)
memory_handler.setLevel(LOGLEVEL)
memory_handler.setFormatter(Formatter(LOGFORMATTER))
memory_handler.name = "MemoryLog"
logger.addHandler(memory_handler)

log_info("Started logging")


def make_log_file(log_path, mode='a'):
    """Points memory handler at a particular file to save the log."""
    file_handler = FileHandler(filename=log_path, mode=mode)
    file_handler.setLevel(LOGLEVEL)
    file_handler.setFormatter(Formatter(LOGFORMATTER))
    memory_handler.setTarget(file_handler)
示例#30
0
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)

id_handler = logging.FileHandler("log/detail.log", mode='w')
id_handler.setLevel(logLevel)

main_formatter = logging.Formatter(FORMAT)
main_handler = logging.FileHandler("log/main.log", mode='w')
main_handler.setLevel(logLevel)
main_handler.setFormatter(formatter)

result_handler = logging.FileHandler("log/report.log", mode='w')
result_handler.setFormatter(formatter_short)

id_mhandler = MemoryHandler(MemoryHandlerCapacity, target=id_handler)

main_mhandler = MemoryHandler(10, target=main_handler)

result_mhandler = MemoryHandler(1000, target=result_handler)
id_handler.setFormatter(formatter_short)

root_logger = logging.getLogger()
if DEBUG:
    pass
    # add the handler to the root logger
    # logging.getLogger().addHandler(console_handler)
    # logging.getLogger().addHandler(main_mhandler)
    # logging.getLogger().setLevel(logLevel)
else:
    #     logging.getLogger().setLevel(logLevel)