Exemple #1
0
 def __init__(self, key='python.logging', redis=None, limit=0):
     if redis is None:
         from redis import Redis
         redis = Redis()
     self.key = key
     assert limit >= 0
     self.limit = limit
     QueueHandler.__init__(self, redis)
Exemple #2
0
 def __init__(self, key='python.logging', redis=None, limit=0):
     if redis is None:
         from redis import Redis
         redis = Redis()
     self.key = key
     assert limit >= 0
     self.limit = limit
     QueueHandler.__init__(self, redis)
Exemple #3
0
def worker_configurer(queue):
    # Create log and set handler to queue handle
    h = QueueHandler(queue)  # Just the one handler needed
    root = logging.getLogger()
    root.addHandler(h)
    # send all messages, for demo; no other level or filter logic applied.
    root.setLevel(logging.DEBUG)
Exemple #4
0
def get_logger(logger_name, queue=False, log_level="debug"):
    """Send all logs to the main process.

    The worker configuration is done at the start of the worker process run.
    Note that on Windows you can't rely on fork semantics, so each process
    will run the logging configuration code when it starts.
    """
    # pylint: disable=redefined-variable-type

    loglevel = log_level.lower()

    if queue:
        # Create log and set handler to queue handle
        handler = QueueHandler(queue)  # Just the one handler needed
        logger = logging.getLogger(logger_name)
        logger.propagate = False
        logger.addHandler(handler)

        if loglevel == "debug":
            logger.setLevel(logging.DEBUG)
        elif loglevel == "info":
            logger.setLevel(logging.INFO)
        elif loglevel == "warning":
            logger.setLevel(logging.WARNING)
        elif loglevel == "error":
            logger.setLevel(logging.ERROR)
        elif loglevel == "critical":
            logger.setLevel(logging.CRITICAL)
    else:
        logger = LoggingFunction(loglevel)

    return logger
Exemple #5
0
    def __init__(self):


        self.logger = colorlog.getLogger(__name__)
        self.logger.setLevel(logging.DEBUG)

        logging.addLevelName(logging.INFO, 'I')
        # colorlog.default_log_colors['I'] = "bold_green"
        logging.addLevelName(logging.CRITICAL, 'C')
        colorlog.default_log_colors['C'] = "bold_red"
        logging.addLevelName(logging.DEBUG, 'D')
        logging.addLevelName(logging.WARNING, 'W')

        SUCCESS = logging.DEBUG + 1
        logging.addLevelName(SUCCESS, 'success')
        colorlog.default_log_colors['success'] = "bold_green"
        setattr(self.logger, 'success', lambda message, *args: self.logger._log(SUCCESS, message, args))

        # Console log msg setting
        sh = colorlog.StreamHandler()
        sh.setLevel(logging.DEBUG + 1)
        sh_fmt = colorlog.ColoredFormatter('%(log_color)s> %(message)s')
        sh.setFormatter(sh_fmt)
        self.logger.addHandler(sh)

        # File log msg setting
        self.config = Config()
        product_name = self.config.get_product_name()
        folder_name = "{}_Log_{}".format(product_name,
                                         datetime.now().year)
        folder_path = os.path.join(os.getcwd(), folder_name)
        self._make_sure_dir_exists(folder_path)

        filename = '{}.txt'.format(datetime.now().strftime("Log %Y%m%d"))
        self.log_path = os.path.join(folder_path, filename)

        fh = logging.FileHandler(self.log_path)
        fmt = logging.Formatter('%(asctime)s, %(levelname)s, %(module)s, %(station)s, %(serial)s, "%(message)s"',
                                datefmt='%Y-%m-%d %H:%M:%S')
        fh.setFormatter(fmt)
        que = Queue.Queue(-1)
        queue_handler = QueueHandler(que)
        queue_handler.setLevel(logging.INFO)
        self.logger.addHandler(queue_handler)
        self.listener = QueueListener(que, fh)

        self.latest_filter = None
Exemple #6
0
 def setUp(self):
     self.handler = h = TestHandler(Matcher())
     self.logger = temp_logger = logging.getLogger()
     self.queue = q = queue.Queue(-1)
     self.qh = qh = QueueHandler(q)
     self.ql = ql = QueueListener(q, h)
     ql.start()
     temp_logger.addHandler(qh)
Exemple #7
0
 def add_queue_handler(self, mp_queque):
     # all records from worker processes go to
     # queue_handler and then into mp_queue.
     self.queue_handler = QueueHandler(mp_queque)
     # add queue_handler to logger
     logger = logging.getLogger(self.logger_name)
     # add class handler
     logger.addHandler(self.queue_handler)
def wrap_target_function(target, log_queue, result_queue, *args, **kwargs):
    try:
        setup_logger(handler=QueueHandler(log_queue))
        result = target(*args, **kwargs)
        result_queue.put(result)
    except:
        result = Exception(''.join(traceback.format_exception(*sys.exc_info())))
        result_queue.put(result)
        raise
Exemple #9
0
def initialize_logging(config):
    multiprocessing.current_process().name = 'Stack'
    cfg = config.get(A.LOGGING, {})

    # log to s3 if there's a destination specified in the config
    bucket = cfg.get(A.logging.S3_BUCKET)
    if bucket:
        json_formatter = JSONFormatter(config)
        s3_handler = S3Handler(bucket, cfg.get(A.logging.S3_PREFIX, ''))
        s3_handler.setFormatter(json_formatter)
        s3_handler.setLevel(logging.INFO)

        # The parent process is the only one that actually buffers the log
        # records in memory and writes them out to s3.  The child processes
        # send all of their log records to the parent's queue.
        #
        # Using the QueueHandler and QueueListener classes from logutils-0.3.2
        # here since they're the implementations in future versions of stdlib
        # logging anyway (logutils is the "backports from Py3k logging"
        # library).
        queue = multiprocessing.Queue()
        ql = QueueListener(queue, s3_handler)

        def cleanup():
            ql.stop()
            s3_handler.flush()

        atexit.register(cleanup)
        ql.start()

        qh = QueueHandler(queue)
        log.addHandler(qh)

    # set local_file to an empty string or some other false value to deactivate
    local_file = cfg.get(A.logging.LOCAL_FILE, 'bang.log')
    if local_file:
        local_handler = logging.FileHandler(local_file)
        local_handler.setFormatter(logging.Formatter(CONSOLE_LOGGING_FORMAT))
        level = sanitize_config_loglevel(
            cfg.get(A.logging.LOCAL_FILE_LEVEL, logging.DEBUG))
        local_handler.setLevel(level)
        log.addHandler(local_handler)

    # also log to stderr
    if sys.stderr.isatty():
        formatter = ColoredConsoleFormatter(CONSOLE_LOGGING_FORMAT)
    else:
        formatter = logging.Formatter(CONSOLE_LOGGING_FORMAT)
    handler = logging.StreamHandler()  # default stream is stderr
    handler.setFormatter(formatter)
    console_level = sanitize_config_loglevel(
        cfg.get(A.logging.CONSOLE_LEVEL, 'INFO'))
    handler.setLevel(console_level)
    log.setLevel(logging.DEBUG)
    log.addHandler(handler)
    log.debug('Logging initialized.')
Exemple #10
0
def worker_configurer(queue):
    """
    The worker configuration is done at the start of the worker process run.
    Note that on Windows you can't rely on fork semantics, so each process
    will run the logging configuration code when it starts.
    """

    handler = QueueHandler(queue)  # Just the one handler needed
    root = logging.getLogger()
    root.addHandler(handler)
    # send all messages, for demo; no other level or filter logic applied.
    root.setLevel(logging.DEBUG)
Exemple #11
0
def initMultiprocessing():
    """
    Remove all handlers and add QueueHandler on top. This should only be called
    inside a multiprocessing worker process, since it changes the logger
    completely.
    """

    # Multiprocess logging may be disabled.
    if not queue:
        return

    # Remove all handlers and add the Queue handler as the only one.
    for handler in logger.handlers[:]:
        logger.removeHandler(handler)

    queue_handler = QueueHandler(queue)
    queue_handler.setLevel(logging.DEBUG)

    logger.addHandler(queue_handler)

    # Change current thread name for log record
    threading.current_thread().name = multiprocessing.current_process().name
Exemple #12
0
def initMultiprocessing():
    """
    Remove all handlers and add QueueHandler on top. This should only be called
    inside a multiprocessing worker process, since it changes the logger
    completely.
    """

    # Multiprocess logging may be disabled.
    if not queue:
        return

    # Remove all handlers and add the Queue handler as the only one.
    for handler in logger.handlers[:]:
        logger.removeHandler(handler)

    queue_handler = QueueHandler(queue)
    queue_handler.setLevel(logging.DEBUG)

    logger.addHandler(queue_handler)

    # Change current thread name for log record
    threading.current_thread().name = multiprocessing.current_process().name
Exemple #13
0
def configureWorkerProcess(config, queue):
    global logQueue
    global logConfig

    logQueue = queue
    logConfig = config

    rootLogger = logging.getLogger()
    # clear the handlers:
    # from the python docs: http://docs.python.org/2/library/multiprocessing.html
    #   "Note that on Windows child processes will only inherit the level of the parent process's logger, any other customisation of the logger will not be inherited"
    #   So on windows the [root] logger will start off with no handlers, while on Unix it inherits the Handler from the main process (hence in a multiprocess situation
    #   if the handlers are not cleared, each log line in the child process will be duplicated
    #   hence just clear the handlers to be safe.
    rootLogger.handlers = []
    rootLogger.addHandler(QueueHandler(queue))
    rootLogger.setLevel(getLogLevel(config.loglevel))

    return
Exemple #14
0
def std_logging_queue_handler():
    start = datetime.datetime.now()
    q = Queue(-1)

    logger = logging.getLogger()
    hdlr = logging.FileHandler('qtest.out', 'w')
    ql = QueueListener(q, hdlr)


    # Create log and set handler to queue handle
    root = logging.getLogger()
    root.setLevel(logging.DEBUG) # Log level = DEBUG
    qh = QueueHandler(q)
    root.addHandler(qh)

    ql.start()

    for i in range(100000):
        logging.info("msg:%d" % i)
    ql.stop()
    print(datetime.datetime.now() - start)
Exemple #15
0
def initialize_logging(level, queue):
    """Setup logging for a process.

    Creates a base logger for pywall.  Installs a single handler, which will
    send packets across a queue to the logger process.  This function should be
    called by each of the three worker processes before they start.

    """
    formatter = _get_formatter()

    logger = logging.getLogger('pywall')
    logger.setLevel(level)

    handler = QueueHandler(queue)
    handler.setLevel(level)
    handler.setFormatter(formatter)

    logger.addHandler(handler)
Exemple #16
0
    def _init_logging(self, loglevel=LOGLEVEL):
        """Initialize log listener and log queue.

        Args:
            loglevel: The log level with of StreamHandler to be started.
        """

        loglevel = loglevel.lower()

        # Create handler
        handler = utils.get_stream_log_handler(loglevel=loglevel)

        # Start queue listener using the stream handler above
        self.log_queue = Queue(-1)
        self.listener = utils.CustomQueueListener(self.log_queue, handler)
        self.listener.start()

        # Create log and set handler to queue handle
        root = logging.getLogger()
        qhandler = QueueHandler(self.log_queue)
        root.addHandler(qhandler)

        #        self.log = utils.get_logger("test_datafetcher", self.log_queue)
        self.log = MockLogging()
Exemple #17
0
def get_logger(logger_name, queue=False, log_level="debug"):
    """Send all logs to the main process.

    The worker configuration is done at the start of the worker process run.
    Note that on Windows you can't rely on fork semantics, so each process
    will run the logging configuration code when it starts.
    """
    # pylint: disable=redefined-variable-type

    loglevel = log_level.lower()

    if queue:
        # Create log and set handler to queue handle
        handler = QueueHandler(queue)  # Just the one handler needed
        logger = logging.getLogger(logger_name)
        logger.propagate = False
        logger.addHandler(handler)

        logging_lvl = convert_str_to_log_level(loglevel)
        logger.setLevel(logging_lvl)
    else:
        logger = LoggingFunction(loglevel)

    return logger
def worker_init(q, level=logging.INFO):
    # all records from worker processes go to qh and then into q
    qh = QueueHandler(q)
    logger = logging.getLogger()
    logger.setLevel(level)
    logger.addHandler(qh)
Exemple #19
0
        if not os.path.exists(ipc_path):
            os.mkdir(ipc_path)
            os.chmod(ipc_path, 0o777)
            logging.info(
                "Creating directory for IPC communication: {0}".format(
                    ipc_path))

        return ipc_path

    log_queue, log_queue_listener = start_logging("xfelDetector.log")

    # Create log and set handler to queue handle
    root = logging.getLogger()
    root.setLevel(logging.DEBUG)  # Log level = DEBUG
    qh = QueueHandler(log_queue)
    root.addHandler(qh)

    ipc_path = set_up_ipc_path()

    context = zmq.Context()
    #    current_pid = os.getpid()
    current_pid = 12345

    hostname = "10.253.0.52"
    n_channels = 4
    xfel_connections = []
    for i in range(n_channels):
        port = 4600 + i
        xfel_connections.append((hostname, port))
    print("Connecting to", xfel_connections)
Exemple #20
0
    def add_pipeline(self, ppl):
        self._pipelines[ppl.name] = ppl
        ppl.queue_handler = QueueHandler(self.queue)

        # direct messages from this pipeline to the main bus
        ppl.logger.addHandler(ppl.queue_handler)
Exemple #21
0
 def __init__(self):
     self.q = Queue(-1)
     self.ql = QueueListener(self.q, *tuple(getLogger('PacPac').handlers))
     self.qh = QueueHandler(self.q)