コード例 #1
0
    def __init__(self,
                 endpoint,
                 capacity=1000,
                 flushLevel=logging.ERROR,
                 threadCount=1,
                 logger=None):
        """
        Initializes PulseHandler using the REST API endpoint, buffer capacity,
        buffer flush level and thread count.

        :param endpoint: The REST API endpoint for the Pulse Log Collector
        :type endpoint: str
        :param capacity: Number of records to buffer before flushing.
                         Defaults to 1000.
        :type capacity: int
        :param flushLevel: Log level at which to to flush the buffer.
        :type flushLevel: Log Level
        :param threadCount: Number of threads to handle post requests.
        :type threadCount: int
        :param logger: :class:`logging.Logger` object for debug logging. If none
                       is provided, a StreamHandler will be created to log to
                       sys.stdout. It is recommended that you provide a
                       logger if you plan to use more than one instance of this
                       class.
        :type logger: logging.Logger
        :rtype: PulseHandler
        """
        PulseBatcher.__init__(self, endpoint, capacity, threadCount, logger)
        MemoryHandler.__init__(self, capacity, flushLevel)
        # Cleanup when Python terminates
        atexit.register(self.close)
コード例 #2
0
    def __init__(self, when='h', interval=1, backupCount=0):
        """
        Constructor for logging formatter.
        """
        # Formatting string
        format_str = '%(asctime)s - %(levelname)s'
        if TvbProfile.current.cluster.IN_OPERATION_EXECUTION_PROCESS:
            log_file = self.CLUSTER_NODES_LOG_FILE
            if TvbProfile.current.cluster.IS_RUNNING_ON_CLUSTER_NODE:
                node_name = TvbProfile.current.cluster.CLUSTER_NODE_NAME
                if node_name is not None:
                    format_str += ' [node:' + str(node_name) + '] '
            else:
                format_str += ' [proc:' + str(os.getpid()) + '] '
        else:
            log_file = self.WEB_LOG_FILE

        format_str += ' - %(name)s - %(message)s'

        rotating_file_handler = SimpleTimedRotatingFileHandler(log_file, when, interval, backupCount)
        rotating_file_handler.setFormatter(logging.Formatter(format_str))

        MemoryHandler.__init__(self, capacity=self.BUFFER_CAPACITY, target=rotating_file_handler)
        
        
        
        
コード例 #3
0
ファイル: log.py プロジェクト: you285168/slgweb
 def __init__(self,
              capacity,
              mailhost,
              fromaddr,
              toaddrs,
              subject,
              credentials=None,
              secure=None,
              timeout=5.0):
     MemoryHandler.__init__(self,
                            capacity,
                            flushLevel=logging.INFO,
                            target=None)
     if isinstance(mailhost, (list, tuple)):
         self.mailhost, self.mailport = mailhost
     else:
         self.mailhost, self.mailport = mailhost, None
     if isinstance(credentials, (list, tuple)):
         self.username, self.password = credentials
     else:
         self.username = None
     self.fromaddr = fromaddr
     if isinstance(toaddrs, str):
         toaddrs = [toaddrs]
     self.toaddrs = toaddrs
     self.subject = subject
     self.secure = secure
     self.timeout = timeout
コード例 #4
0
ファイル: log.py プロジェクト: kangta123/easy-websocket
 def __init__(self, capacity, mail_subject):
     MemoryHandler.__init__(self,
                            capacity,
                            flushLevel=logging.ERROR,
                            target=None)
     self.mail_subject = mail_subject
     self.flushed_buffers = []
コード例 #5
0
 def enable_memory_log(self):
     from logging.handlers import MemoryHandler
     if self._memory_handler is None:
         h = MemoryHandler(10 * 1024 * 1024)
         h.setFormatter(self._get_formatter())
         self._log.addHandler(h)
         self._memory_handler = h
コード例 #6
0
def full_context_error_logger():
    """ capture all log specific to a context
    :return:
    """
    from logging.handlers import MemoryHandler
    from StringIO import StringIO
    buffer = StringIO()
    logLevel = logging.DEBUG
    streamhandler = logging.StreamHandler(buffer)
    streamhandler.setLevel(logLevel)
    streamhandler.setFormatter(formatter)
    memory_handler = MemoryHandler(capacity=1024 * 100,
                                   flushLevel=logging.ERROR,
                                   target=streamhandler)
    memory_handler.setLevel(logLevel)
    memory_handler.setFormatter(formatter)
    rootLogger = logging.getLogger()
    rootLogger.addHandler(memory_handler)
    result = {"error_log": None}
    try:
        yield result
    except:
        memory_handler.flush()
        buffer.flush()
        result["error_log"] = buffer.getvalue() + traceback.format_exc()
    finally:
        rootLogger.removeHandler(memory_handler)
        memory_handler.close()
        buffer.close()
コード例 #7
0
    def __init__(self, when='h', interval=1, backupCount=0):
        """
        Constructor for logging formatter.
        """
        # Formatting string
        format_str = '%(asctime)s - %(levelname)s'
        if TvbProfile.current.cluster.IN_OPERATION_EXECUTION_PROCESS:
            log_file = self.CLUSTER_NODES_LOG_FILE
            if TvbProfile.current.cluster.IS_RUNNING_ON_CLUSTER_NODE:
                node_name = TvbProfile.current.cluster.CLUSTER_NODE_NAME
                if node_name is not None:
                    format_str += ' [node:' + str(node_name) + '] '
            else:
                format_str += ' [proc:' + str(os.getpid()) + '] '
        else:
            log_file = self.WEB_LOG_FILE

        format_str += ' - %(name)s - %(message)s'

        rotating_file_handler = SimpleTimedRotatingFileHandler(log_file, when, interval, backupCount)
        rotating_file_handler.setFormatter(logging.Formatter(format_str))

        MemoryHandler.__init__(self, capacity=self.BUFFER_CAPACITY, target=rotating_file_handler)
        
        
        
        
コード例 #8
0
 def close(self):
     """
     Flush remaining records and terminate all threads
     """
     # Note: Overrides PulseBatcher.close which overrides MemoryHandler.close
     # when inherited by this class
     MemoryHandler.close(self)
     PulseBatcher.close(self)
コード例 #9
0
ファイル: logger.py プロジェクト: juanchitot/jaimeboot
    def __init__(self):
        memory_handler = MemoryHandler(self.MEMORY_HANDLER_SIZE)
        memory_handler.setFormatter(Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))

        self.handlers = []
        self.handlers.append(memory_handler)

        self.registered_loggers = {}
コード例 #10
0
 def __init__(self, level=logging.NOTSET, tag=None):
     MemoryHandler.__init__(self, capacity=400)
     if tag is not None:
         self.tag = tag
     self.level = level
     if py32:
         self.addFilter(self._filter)
     else:
         self.addFilter(FilterCallback(self._filter))
コード例 #11
0
    def __init__(self,
                 capacity,
                 mailhost,
                 toaddrs,
                 subject=None,
                 flushLevel=ERROR,
                 *,
                 credentials=None,
                 fromaddr=None,
                 secure=None,
                 mailport=None,
                 timeout=5.0):
        flushLevel = validate_log_level_int(flushLevel)

        if isinstance(credentials, str):
            credentials = (
                credentials,
                getpass(
                    "Please enter a password for {}: ".format(credentials)),
            )

        if fromaddr is None:
            if not isinstance(credentials,
                              (list, tuple)) or len(credentials) != 2:
                raise ValueError(
                    "you must supply either fromaddr or credentials=(uername, password); "
                    "fromaddr is None but credentials = {}".format(
                        credentials))
            fromaddr = credentials[0]

        if isinstance(toaddrs, str):
            toaddrs = [toaddrs]
        elif not toaddrs:
            raise ValueError(
                "you must supply toaddrs, either a single email address or a list thereof"
            )

        if mailport is not None:
            # SMTPHandler uses a tuple for this
            mailhost = (mailhost, mailport)
        elif not isinstance(mailhost, (list, tuple)) or len(mailhost) != 2:
            raise ValueError(
                "If mailport is not explicitly passed, mailhost must be a (host, port) tuple; got {}"
                .format(mailhost))

        MemoryHandler.__init__(self, capacity, flushLevel=flushLevel)
        SMTPHandler.__init__(
            self,
            mailhost=mailhost,
            fromaddr=fromaddr,
            toaddrs=toaddrs,
            subject=subject,
            credentials=credentials,
            secure=secure,
            timeout=timeout,
        )
コード例 #12
0
ファイル: Logs.py プロジェクト: couchjd/playground
def make_mem_logger(name,to_log,size=8192):
	from logging.handlers import MemoryHandler
	logger=logging.getLogger(name)
	hdlr=MemoryHandler(size,target=to_log)
	formatter=logging.Formatter('%(message)s')
	hdlr.setFormatter(formatter)
	logger.addHandler(hdlr)
	logger.memhandler=hdlr
	logger.setLevel(logging.DEBUG)
	return logger
コード例 #13
0
ファイル: Logs.py プロジェクト: Mokl/SP4L
def make_mem_logger(name, to_log, size=8192):
    from logging.handlers import MemoryHandler
    logger = logging.getLogger(name)
    hdlr = MemoryHandler(size, target=to_log)
    formatter = logging.Formatter('%(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.memhandler = hdlr
    logger.setLevel(logging.DEBUG)
    return logger
コード例 #14
0
ファイル: lightning_logger_patch.py プロジェクト: wdika/mridc
def add_memory_handlers_to_pl_logger():
    """
    Adds two MemoryHandlers to pytorch_lightning's logger. These two handlers are essentially message buffers. This
    function is called in mridc.utils.__init__.py. These handlers are used in add_filehandlers_to_pl_logger to flush
    buffered messages to files.
    """
    if not HANDLERS:
        HANDLERS["memory_err"] = MemoryHandler(-1)
        HANDLERS["memory_err"].addFilter(lambda record: record.levelno > _logging.INFO)
        HANDLERS["memory_all"] = MemoryHandler(-1)
        pl._logger.addHandler(HANDLERS["memory_err"])
        pl._logger.addHandler(HANDLERS["memory_all"])
コード例 #15
0
def create_logging_handler_for_collection(tempdir, prefix):
    from sys import maxsize
    from os import path
    from logging import FileHandler, DEBUG, Formatter
    from logging.handlers import MemoryHandler
    target = FileHandler(path.join(tempdir, "{}.{}.debug.log".format(prefix, get_timestamp())))
    target.setFormatter(Formatter(**LOGGING_FORMATTER_KWARGS))
    handler = MemoryHandler(maxsize, target=target)
    handler.setLevel(DEBUG)
    try:
        yield handler
    finally:
        handler.close()
コード例 #16
0
ファイル: Logs.py プロジェクト: solarblue/waf
def make_mem_logger(name, to_log, size=8192):
    """
	Creates a memory logger to avoid writing concurrently to the main logger
	"""
    from logging.handlers import MemoryHandler
    logger = logging.getLogger(name)
    hdlr = MemoryHandler(size, target=to_log)
    formatter = logging.Formatter('%(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.memhandler = hdlr
    logger.setLevel(logging.DEBUG)
    return logger
コード例 #17
0
ファイル: Logs.py プロジェクト: afeldman/waf
def make_mem_logger(name, to_log, size=8192):
	"""
	Creates a memory logger to avoid writing concurrently to the main logger
	"""
	from logging.handlers import MemoryHandler
	logger = logging.getLogger(name)
	hdlr = MemoryHandler(size, target=to_log)
	formatter = logging.Formatter('%(message)s')
	hdlr.setFormatter(formatter)
	logger.addHandler(hdlr)
	logger.memhandler = hdlr
	logger.setLevel(logging.DEBUG)
	return logger
コード例 #18
0
ファイル: __init__.py プロジェクト: yangaound2017/logutil
 def __init__(self,
              filename,
              capacity,
              flushLevel,
              flushInterval,
              target=None,
              **kwargs):
     MemoryHandler.__init__(
         self, capacity, flushLevel, target
         or make_handler(filename, capacity=1, **kwargs))
     self.__flushInterval = flushInterval
     self.__lastFlushTime = time.time()
     self.__condition = threading.Condition()
     self.__flusher = None
コード例 #19
0
ファイル: qclogging.py プロジェクト: ucgmsim/qcore
def duplicate_handlers(old_handlers, formatter):
    log_files = []
    new_handlers = []
    for handler in old_handlers:
        if isinstance(handler, logging.FileHandler):
            log_name = handler.baseFilename
            if log_name in log_files:
                continue
            log_files.append(log_name)
            task_file_out_handler = logging.FileHandler(log_name)
            task_file_out_handler.setFormatter(formatter)
            task_file_out_handler.setLevel(handler.level)
            new_handlers.append(task_file_out_handler)

        if isinstance(handler, MemoryHandler) and isinstance(
                handler.target, logging.FileHandler):
            log_name = handler.target.baseFilename
            if log_name in log_files:
                continue
            log_files.append(log_name)

            task_file_out_handler = logging.FileHandler(log_name, delay=True)
            task_file_out_handler.setFormatter(formatter)
            task_file_out_handler.setLevel(handler.level)

            task_mem_handler = MemoryHandler(handler.capacity,
                                             flushLevel=handler.flushLevel)
            task_mem_handler.setFormatter(formatter)
            task_mem_handler.setLevel(handler.level)
            task_mem_handler.setTarget(task_file_out_handler)
            new_handlers.append(task_mem_handler)

    return new_handlers
コード例 #20
0
ファイル: qclogging.py プロジェクト: ucgmsim/qcore
def add_buffer_handler(
    logger: logging.Logger,
    buffer_size: int = 100,
    flush_level: int = 1000,
    file_name: str = None,
):
    """
        Adds a buffer handler to the logger.
        Useful for log files that are written to the output directory if that directory does not exist yet
        :param logger: The logger object
        :param buffer_size: The number of messages to buffer before a flush is forced
        :param flush_level: The minimum level of a message to cause the handler to be flushed early.
        Defaults to a value that won't be reached by normal log levels to prevent premature flushing
        :param file_name: The name of the log file to be used when it is available
        """
    # Flush level should be high enough that flush is not called for regular messages
    buffer_handler = MemoryHandler(buffer_size, flushLevel=flush_level)
    if logger.name.startswith(THREADED):
        buffer_handler.setFormatter(general_threaded_formatter)
    else:
        buffer_handler.setFormatter(general_formatter)

    if file_name is not None:
        file_out_handler = logging.FileHandler(file_name, delay=True)
        if logger.name.startswith(THREADED):
            file_out_handler.setFormatter(general_threaded_formatter)
        else:
            file_out_handler.setFormatter(general_formatter)

        buffer_handler.setTarget(file_out_handler)

    logger.addHandler(buffer_handler)
コード例 #21
0
ファイル: nemo_logging.py プロジェクト: askaydevs/ITN_Phore
    def _define_logger(self, capture_warnings=True):
        """ Creates the logger if not already created. Called in init"""

        # Use double-checked locking to avoid taking lock unnecessarily.
        if self._logger is not None:
            return self._logger

        with self._logger_lock:
            try:
                self._logger = _logging.getLogger("nemo_logger")
                # By default, silence all loggers except the logger for rank 0
                self.remove_stream_handlers()
                # If NEMO_TESTING is set, add a streamhandler to all ranks
                if get_envbool(NEMO_ENV_VARNAME_TESTING, False):
                    old_factory = _logging.getLogRecordFactory()

                    def record_factory(*args, **kwargs):
                        record = old_factory(*args, **kwargs)
                        record.rank = self.rank
                        return record

                    _logging.setLogRecordFactory(record_factory)
                    self.add_stream_handlers(formatter=DebugNeMoFormatter)
                elif is_global_rank_zero():
                    self.add_stream_handlers()

                # Add memoryhandlers, essentially buffers. They are used to save messages that we will flush to file
                # once the appropriate file handlers are added.
                if is_global_rank_zero():
                    # Add a memoryhandler for error messages. Only logged on rank 0
                    self._handlers["memory_err"] = MemoryHandler(-1)
                    self._handlers["memory_err"].addFilter(
                        lambda record: record.levelno > _logging.INFO)
                    formatter = BaseNeMoFormatter
                    self._handlers["memory_err"].setFormatter(formatter())
                    self._logger.addHandler(self._handlers["memory_err"])
                # Add a memoryhandler for all messages on all ranks
                self._handlers["memory_all"] = MemoryHandler(-1)
                formatter = BaseNeMoFormatter
                self._handlers["memory_all"].setFormatter(formatter())
                self._logger.addHandler(self._handlers["memory_all"])

            finally:
                level = Logger.INFO
                if get_envbool(NEMO_ENV_VARNAME_TESTING, False):
                    level = Logger.DEBUG
                self.set_verbosity(verbosity_level=level)
                self.captureWarnings(capture_warnings)

        self._logger.propagate = False
コード例 #22
0
ファイル: utils.py プロジェクト: spaceone/ucs-school
def add_module_logger_to_schoollib():
    global _module_handler
    if _module_handler is None:
        module_handler = ModuleHandler(udebug_facility=ud.MODULE)
        _module_handler = MemoryHandler(-1,
                                        flushLevel=logging.DEBUG,
                                        target=module_handler)
        _module_handler.setLevel(logging.DEBUG)
        logger.addHandler(_module_handler)
    else:
        logger.info(
            'add_module_logger_to_schoollib() should only be called once! Skipping...'
        )
    return _module_handler
コード例 #23
0
    def scan_package(
        self,
        parsed_args: argparse.Namespace,
        count: int,
        package: Package,
        num_packages: int,
    ) -> Optional[Dict[str, List[Issue]]]:
        """Scan each package in a separate process while buffering output."""
        logger = logging.getLogger()
        old_handler = None
        if logger.handlers[0]:
            old_handler = logger.handlers[0]
            handler = MemoryHandler(10000,
                                    flushLevel=logging.ERROR,
                                    target=old_handler)
            logger.removeHandler(old_handler)
        logger.addHandler(handler)

        logging.info("-- Scanning package %s (%d of %d) --", package.name,
                     count, num_packages)

        sio = io.StringIO()
        old_stdout = sys.stdout
        old_stderr = sys.stderr
        sys.stdout = sio
        sys.stderr = sio

        issues, dummy = self.run(package.path, parsed_args)

        sys.stdout = old_stdout
        sys.stderr = old_stderr
        logging.info(sio.getvalue())

        if issues is not None:
            logging.info(
                "-- Done scanning package %s (%d of %d) --",
                package.name,
                count,
                num_packages,
            )
        else:
            logging.error("Failed to run statick on package %s!", package.name)

        if old_handler is not None:
            handler.flush()
            logger.removeHandler(handler)
            logger.addHandler(old_handler)

        return issues
コード例 #24
0
ファイル: tests.py プロジェクト: zawszaws/sms-rapidsms
def test_logger_mixin():
    obj = LoggableStub()

    from logging.handlers import MemoryHandler
    import logging

    log = logging.getLogger()
    handler = MemoryHandler(999)
    log.setLevel(logging.DEBUG)
    log.addHandler(handler)

    obj.debug("This is a DEBUG message")
    obj.info("This is an INFORMATIVE message")
    obj.warning("This is a WARNING")
    obj.error("This is an ERROR")
    obj.critical("This is a CRITICAL error")
    obj.exception("This is an exception")
    obj.exception()

    # There should be 8 messages: 7 from above, plus
    # one more for LoggerMixin's own deprecation warning
    assert_equals(len(handler.buffer), 7 + 1)
    assert_equals(handler.buffer[3].name, "loggablestub")
    assert_equals(handler.buffer[3].msg, "This is a WARNING")

    log.removeHandler(handler)
コード例 #25
0
def initialize_root_logger(desc: str) -> None:
    # add TqdmLoggingHandler and MemoryHandler targeting FileHandler
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logging.getLogger("matplotlib").setLevel(logging.ERROR)

    tqdm_handler = TqdmLoggingHandler(level=logging.INFO)
    tqdm_handler.setFormatter(logging.Formatter("{message}", style="{"))
    logger.addHandler(tqdm_handler)

    dt_now = datetime.datetime.now()
    filename = f"./log/{dt_now.strftime('%Y%m%d_%H%M%S')}_{desc}.log"
    make_parent_dir(filename)
    # encodingを指定しないと、Windowsではshift-jisで出力される
    # delay=Trueを指定し、初書込み時にファイルを作成するようにする
    file_handler = logging.FileHandler(filename, encoding="utf-8", delay=True)
    file_handler.setLevel(logging.DEBUG)
    file_handler.setFormatter(
        logging.Formatter("{levelname:<5}| {message}", style="{"))
    # いちいちファイルに書き込むと遅いのでMemoryHandlerを使う
    # logger.info(), logger.debug()などを呼んだ回数がcapacityを上回った場合、targetに出力される
    # flushLevelは高めに設定
    memory_handler = MemoryHandler(capacity=100,
                                   flushLevel=logging.ERROR,
                                   target=file_handler)
    logger.addHandler(memory_handler)

    logger.info(f"{desc} @ {dt_now.strftime('%Y/%m/%d %H:%M:%S')}")
    logger.debug(f"Args: {sys.argv}")
コード例 #26
0
ファイル: bench_decorator.py プロジェクト: alan-mushi/KSI
    def wrapped_func(*args, **kwargs):
        if PERFORM_BENCHMARKS and func.__module__.startswith(BENCHMARK_MOTIF):
            start_t = datetime.now()
            ret = func(*args, **kwargs)
            end_t = datetime.now()

            # Use our custom logger
            logger = logging.getLogger(LOGGER_NAME)
            # Don't propagate to the parent(s)
            logger.propagate = False

            # Add the handler if not already present
            if not logger.hasHandlers():
                # Log to a buffer in memory, if the buffer is full flush it to the target
                logger_outfile = LOGGER_DIR + LOGGER_NAME + '.log'
                logger.addHandler(
                    MemoryHandler(4096,
                                  target=logging.FileHandler(logger_outfile,
                                                             mode='w+')))

            # Log with the custom logger at a custom level
            logger.log(LOGGER_LEVEL, '%s|%s',
                       func.__module__ + "." + func.__name__,
                       str(end_t - start_t))
            return ret

        else:
            return func(*args, **kwargs)
コード例 #27
0
ファイル: LLog.py プロジェクト: shixm/cicd
 def __init__(self,
              name,
              size=10,
              backupCount=50,
              cacheRecords=1,
              print_to_console=True):
     '''  size : 单个日志文件的大小,单位是M。
          backupCount :备份的最大日志文件数'
          cacheRecords :日志缓存数。达到该数字才会写硬盘。flushLevel以上级别的除外'''
     self.logger = logging.getLogger(name)
     self.logger.setLevel(logging.DEBUG)
     logdir = os.path.join(os.path.dirname(__name__), "logs")
     #         logdir = os.path.join(Path.LOG, name)
     fileutil.ensure_dir_exists(logdir)
     logfile = os.path.join(logdir, '%s.log' % name)
     hdlr = RotatingFileHandler(logfile,
                                'a',
                                maxBytes=1024 * 1024 * size,
                                backupCount=backupCount)
     hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
     mh = MemoryHandler(cacheRecords, flushLevel=logging.INFO,
                        target=hdlr)  #flushLevel(包含)以上级别的立刻写硬盘
     self.logger.addHandler(mh)
     if print_to_console:
         #将大于或等于DEBUG级别的信息输出到控件台
         hdlr = logging.StreamHandler(sys.stdout)
         hdlr.setFormatter(logging.Formatter("%(message)s", ""))
         hdlr.setLevel(logging.DEBUG)
         self.logger.addHandler(hdlr)
     self.logger.print = self.logger.debug
コード例 #28
0
def initialize_root_logger(enable_logfile, log_filename=None, sim_case_description=""):
    date = datetime.now()
    if enable_logfile and log_filename is None:
        log_filename = (
            f"./logs/{date.strftime('%Y-%m-%d_%H-%M-%S')}_{sim_case_description}.log"
        )

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logging.getLogger("matplotlib").setLevel(level=logging.ERROR)
    tqdm_handler = TqdmLoggingHandler(logging.INFO)
    tqdm_handler.setFormatter(logging.Formatter("{message}", style="{"))
    logger.addHandler(tqdm_handler)
    if enable_logfile:
        make_parent_dir(log_filename)
        file_handler = logging.FileHandler(
            filename=log_filename, encoding="utf-8", delay=True
        )
        file_handler.setLevel(logging.DEBUG)
        file_handler.setFormatter(
            logging.Formatter("{levelname:<5} | {message}", style="{")
        )
        memory_handler = MemoryHandler(
            capacity=1000, flushLevel=logging.ERROR, target=file_handler
        )
        logger.addHandler(memory_handler)
コード例 #29
0
def log_if_errors(logger,
                  target_handler=None,
                  flush_level=None,
                  capacity=None):
    if target_handler is None:
        target_handler = logging.StreamHandler()
    if flush_level is None:
        flush_level = logging.ERROR
    if capacity is None:
        capacity = 100
    handler = MemoryHandler(capacity,
                            flushLevel=flush_level,
                            target=target_handler)

    def decorator(fn):
        def wrapper(*args, **kwargs):
            logger.addHandler(handler)
            try:
                return fn(*args)
            except Exception:
                logger.exception('call failed')
                raise
            finally:
                super(MemoryHandler, handler).flush()
                logger.removeHandler(handler)

        return wrapper

    return decorator
コード例 #30
0
ファイル: utils.py プロジェクト: xlsdnx/zenodo
def get_file_logger(logfile, audit_type, audit_id):
    """Return a buffered file logger."""
    logger = logging.getLogger('zenodo.auditor.{type}.{id}'.format(
        type=audit_type, id=audit_id))
    if logfile:
        file_handler = logging.FileHandler(logfile, mode='w')
        logger.addHandler(MemoryHandler(100, target=file_handler))
    return logger
コード例 #31
0
    def setup_memory_logging_handler(cls) -> None:
        if cls._pytest_active() and cls.memory_handler:
            return

        cls.memory_handler = MemoryHandler(capacity=maxsize,
                                           flushLevel=maxsize)

        root = getLogger()
        root.setLevel(NOTSET)
        root.addHandler(cls.memory_handler)
コード例 #32
0
 def shouldFlush(self, record):
     """
     Check for buffer full or a record at the flushLevel or higher.
     """
     # Note: Calling class method directly to avoid MRO
     #
     # Note: Overrides PulseBatcher.shouldFlush which overrides
     # MemoryHandler.shouldFlush
     #       when inherited by this class
     return MemoryHandler.shouldFlush(self, record)
コード例 #33
0
    def make_memory(capacity, flushlevel, target):
        """
        Same as make(), but with a memory buffer, flushed when full or
        on critical events.

        :param capacity: memory handler capacity
        :param flushlevel: memory handler flush level.
        :return: memory handler
        :rtype: MemoryHandler
        """
        return MemoryHandler(capacity, flushLevel=flushlevel, target=target)
コード例 #34
0
def get_logger(path):
    rotating_handler = TimedRotatingFileHandler(path, when='d', backupCount=7)
    formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
    rotating_handler.setFormatter(formatter)
    memory_handler = MemoryHandler(capacity=512 * 1024,
                                   target=rotating_handler)
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(formatter)
    logger = logging.getLogger("luftdaten")
    logger.setLevel(logging.DEBUG)
    logger.addHandler(memory_handler)
    logger.addHandler(console_handler)
    return logger
コード例 #35
0
 def __update_logs_configuration(self):
     try:
         logs_conf_file_path = self.__gateway._config_dir + 'logs.conf'
         with open(logs_conf_file_path, 'w') as logs:
             logs.write(self.__new_logs_configuration + "\r\n")
         fileConfig(logs_conf_file_path)
         self.__gateway.main_handler = MemoryHandler(-1)
         self.__gateway.remote_handler = TBLoggerHandler(self.__gateway)
         self.__gateway.main_handler.setTarget(
             self.__gateway.remote_handler)
         log.debug("Logs configuration has been updated.")
     except Exception as e:
         log.exception(e)
コード例 #36
0
def set_up_logger(log_level=logging.DEBUG):
    stream_handler = logging.StreamHandler()
    stream_handler.setLevel(log_level)
    stream_handler.setFormatter(logging.Formatter(fmt=LOGGING_MSG_FORMAT,
                                                  datefmt=LOGGING_DATE_FORMAT))
    logger.setLevel(log_level)
    if CURRENT_ENV != 'Product':
        logger.addHandler(stream_handler)
        return

    memory_handler = MemoryHandler(
        capacity=64,
        flushLevel=logging.ERROR,
        target=stream_handler
    )
    memory_handler.setFormatter(logging.Formatter(fmt=LOGGING_MSG_FORMAT,
                                                  datefmt=LOGGING_DATE_FORMAT))
    logger.addHandler(memory_handler)

    def flush():
        memory_handler.flush()
    atexit.register(flush)
    logger.debug("Logger init")
コード例 #37
0
ファイル: test.py プロジェクト: franciscomello/grammar
        # Original punctuation should be out of the brackets
        self.positive("I am hear!", "I am [here]!")
        # Drop leading and trailing spaces
        self.positive(" their is ", "[there] is")
        # Retain double spaces
        self.positive("their  is", "[there]  is")
        # Drop periods, commas, and semicolons
        self.positive("their is.", "[there] is")
        self.positive("their is,", "[there] is")
        self.positive("their is;", "[there] is")

    def test_wording(self):
        """Verify that wording can be generated without failing"""
        self.positive(
            "Their is and your don't supposed to! (blah) They think their is.")
        logging.debug(self.parser.generate_wording('@@')
                      .encode('ascii', 'replace'))
        # not implemented yet
        self.assertRaises(NotImplementedError,
                          self.parser.generate_wording_long, '')

if __name__ == '__main__':
    stream_null = StringIO()
    logging.basicConfig(stream=stream_null, level=logging.DEBUG)
    handler_stream = StreamHandler(stream=sys.stderr)
    handler_mem = MemoryHandler(1024, target=handler_stream)
    handler_mem.setLevel(logging.DEBUG)
    handler_mem.setFormatter(logging.Formatter())
    logging.getLogger().addHandler(handler_mem)
    unittest.main()
コード例 #38
0
ファイル: ilog.py プロジェクト: opsteev/iUTF
 def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
     MemoryHandler.__init__(self, capacity, flushLevel=flushLevel, target=target)
コード例 #39
0
ファイル: pdLogging.py プロジェクト: ibm-cds-labs/pixiedust
import logging
from logging.handlers import MemoryHandler
from IPython.core.magic import (Magics, magics_class, line_magic)

from collections import deque
logMessageBufferSize = 200
logMessages=deque([], logMessageBufferSize)

class PixiedDustLoggingHandler(logging.Handler):
    def emit(self, record):
        logMessages.append((record.levelno, self.format(record)))

#init pixiedust logging
pixiedustHandler = PixiedDustLoggingHandler()
pixiedustHandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
memHandler = MemoryHandler(1, target=pixiedustHandler)
memHandler.setLevel(logging.DEBUG)

pixiedustLogger = logging.getLogger("PixieDust")
pixiedustLogger.addHandler(memHandler)
pixiedustLogger.setLevel(logging.DEBUG)

@magics_class
class PixiedustLoggingMagics(Magics):
    @line_magic
    def pixiedustLog(self, arg_s):
        try:
            opts,args = self.parse_options( arg_s, "l:f:m:")
            level = logging.getLevelName( opts.get("l", "INFO").upper() )
            if not isinstance(level, int):
                level = logging.getLevelName("INFO")
コード例 #40
0
import os
import os.path
import pkg_resources
from pyphant.core.Helpers import getPyphantPath
LOGDIR = getPyphantPath()
import logging
from logging.handlers import MemoryHandler
logging.basicConfig(level=logging.NOTSET,
                    filename=os.path.join(LOGDIR, u'pyphant.log'),
                    filemode='w',
                    format="%(asctime)s - %(levelname)s:%(name)s:%(thread)"\
                    "d:%(module)s.%(funcName)s(l %(lineno)d):%(message)s")
console = logging.StreamHandler()
console.setLevel(logging.WARNING)
pdmh = MemoryHandler(1000, flushLevel=logging.CRITICAL + 1)
pdmh.setLevel(logging.WARNING)
logging.getLogger('').addHandler(pdmh)
logging.getLogger('').addHandler(console)

import sys
import wx
import wx.aui
import sogl
import pyphant.wxgui2.paramvisualization.ParamVisReg as ParamVisReg
from pyphant.core.H5FileHandler import H5FileHandler
from pyphant.wxgui2 import WorkerRepository
from pyphant.wxgui2 import ConfigureFrame
import platform
from pyphant.core.KnowledgeManager import KnowledgeManager
import webbrowser
コード例 #41
0
ファイル: openapc_toolkit.py プロジェクト: uhahn/openapc-de
 def __init__(self, target):
     MemoryHandler.__init__(self, 100000, target=target)
     self.setLevel(logging.ERROR)
コード例 #42
0
            sys.stdout=self
            sys.stderr=self
        else:
            sys.stdout=sys.__stdout__ #old_stdout
            sys.stderr=sys.__stderr__

#adds a stream catcher for display and a memory handler for saving
log_stream=StreamCatch()
logger=getLogger()
display_handler=StreamHandler(stream=log_stream)
display_handler.setLevel(LOGLEVEL)
display_handler.setFormatter(Formatter(LOGFORMATTER))
display_handler.name="StreamCatch"
logger.addHandler(display_handler)

memory_handler=MemoryHandler(MEMBUFFER)
memory_handler.setLevel(LOGLEVEL)
memory_handler.setFormatter(Formatter(LOGFORMATTER))
memory_handler.name="MemoryLog"
logger.addHandler(memory_handler)

log_debug("Started logging")

def make_log_file(log_path, mode='a'):
    """Points memory handler at a particular file to save the log."""
    file_handler = FileHandler(filename=log_path, mode=mode)
    file_handler.setLevel(LOGLEVEL)
    file_handler.setFormatter(Formatter(LOGFORMATTER))
    memory_handler.setTarget(file_handler)

def remove_log_file():
コード例 #43
0
ファイル: test_scan.py プロジェクト: weaverba137/hpsspy
 def __init__(self, capacity=1000000, flushLevel=logging.CRITICAL):
     nh = logging.NullHandler()
     MemoryHandler.__init__(self, capacity,
                            flushLevel=flushLevel, target=nh)