Example #1
0
def listener():
    """
    Wrapper that create a QueueListener, starts it and automatically stops it.
    To be used in a with statement in the main process, for multiprocessing.
    """

    global queue

    # Initialize queue if not already done
    if queue is None:
        try:
            queue = multiprocessing.Queue()
        except OSError as e:
            queue = False

            # Some machines don't have access to /dev/shm. See
            # http://stackoverflow.com/questions/2009278 for more information.
            if e.errno == errno.EACCES:
                logger.warning('Multiprocess logging disabled, because '
                               'current user cannot map shared memory. You won\'t see any' \
                               'logging generated by the worker processed.')

    # Multiprocess logging may be disabled.
    if not queue:
        yield
    else:
        queue_listener = QueueListener(queue, *logger.handlers)

        try:
            queue_listener.start()
            yield
        finally:
            queue_listener.stop()
Example #2
0
def log_server(level, queue, filename, mode='w'):
    """Run the logging server.

    This listens to the queue of log messages, and handles them using Python's
    logging handlers.  It prints to stderr, as well as to a specified file, if
    it is given.

    """
    formatter = _get_formatter()
    handlers = []

    sh = StreamHandler()
    sh.setFormatter(formatter)
    sh.setLevel(level)
    handlers.append(sh)

    if filename:
        fh = FileHandler(filename, mode)
        fh.setFormatter(formatter)
        fh.setLevel(level)
        handlers.append(fh)

    listener = QueueListener(queue, *handlers)
    listener.start()

    # For some reason, queuelisteners run on a separate thread, so now we just
    # "busy wait" until terminated.
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        pass
    finally:
        listener.stop()
def logger_init(level=logging.INFO):
    q = Queue()

    # this is the handler for all log records
    handler = logging.StreamHandler()
    f = logging.Formatter(
        '%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
    handler.setFormatter(f)

    file_handler = logging.FileHandler(LOG_FILE, 'a')
    f = logging.Formatter(
        '%(asctime)s %(processName)-10s %(name)s %(levelname)-8s %(message)s')
    file_handler.setFormatter(f)

    # ql gets records from the queue and sends them to the handler
    ql = QueueListener(q, handler, file_handler)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(level)
    # add the handler to the logger so records from this process are handled
    logger.addHandler(handler)
    logger.addHandler(file_handler)

    return ql, q
Example #4
0
 def setUp(self):
     self.handler = h = TestHandler(Matcher())
     self.logger = temp_logger = logging.getLogger()
     self.queue = q = queue.Queue(-1)
     self.qh = qh = QueueHandler(q)
     self.ql = ql = QueueListener(q, h)
     ql.start()
     temp_logger.addHandler(qh)
Example #5
0
def initialize_logging(config):
    multiprocessing.current_process().name = 'Stack'
    cfg = config.get(A.LOGGING, {})

    # log to s3 if there's a destination specified in the config
    bucket = cfg.get(A.logging.S3_BUCKET)
    if bucket:
        json_formatter = JSONFormatter(config)
        s3_handler = S3Handler(bucket, cfg.get(A.logging.S3_PREFIX, ''))
        s3_handler.setFormatter(json_formatter)
        s3_handler.setLevel(logging.INFO)

        # The parent process is the only one that actually buffers the log
        # records in memory and writes them out to s3.  The child processes
        # send all of their log records to the parent's queue.
        #
        # Using the QueueHandler and QueueListener classes from logutils-0.3.2
        # here since they're the implementations in future versions of stdlib
        # logging anyway (logutils is the "backports from Py3k logging"
        # library).
        queue = multiprocessing.Queue()
        ql = QueueListener(queue, s3_handler)

        def cleanup():
            ql.stop()
            s3_handler.flush()

        atexit.register(cleanup)
        ql.start()

        qh = QueueHandler(queue)
        log.addHandler(qh)

    # set local_file to an empty string or some other false value to deactivate
    local_file = cfg.get(A.logging.LOCAL_FILE, 'bang.log')
    if local_file:
        local_handler = logging.FileHandler(local_file)
        local_handler.setFormatter(logging.Formatter(CONSOLE_LOGGING_FORMAT))
        level = sanitize_config_loglevel(
            cfg.get(A.logging.LOCAL_FILE_LEVEL, logging.DEBUG))
        local_handler.setLevel(level)
        log.addHandler(local_handler)

    # also log to stderr
    if sys.stderr.isatty():
        formatter = ColoredConsoleFormatter(CONSOLE_LOGGING_FORMAT)
    else:
        formatter = logging.Formatter(CONSOLE_LOGGING_FORMAT)
    handler = logging.StreamHandler()  # default stream is stderr
    handler.setFormatter(formatter)
    console_level = sanitize_config_loglevel(
        cfg.get(A.logging.CONSOLE_LEVEL, 'INFO'))
    handler.setLevel(console_level)
    log.setLevel(logging.DEBUG)
    log.addHandler(handler)
    log.debug('Logging initialized.')
Example #6
0
    def remove_client(self, id):

        log_handler = self._clients[id]
        del self._clients[id]

        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.remove(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()
Example #7
0
    def add_client(self, id, log_handler):
        self._clients[id] = log_handler

        # add handler to a QueueListener
        # TODO: this is bad because all other clients have to wait until this
        # function returns
        self.queue_listener.stop()
        handlers = list(self.queue_listener.handlers)
        handlers.append(log_handler)
        self.queue_listener = QueueListener(self.queue, *handlers)
        self.queue_listener.start()
Example #8
0
    def __init__(self):


        self.logger = colorlog.getLogger(__name__)
        self.logger.setLevel(logging.DEBUG)

        logging.addLevelName(logging.INFO, 'I')
        # colorlog.default_log_colors['I'] = "bold_green"
        logging.addLevelName(logging.CRITICAL, 'C')
        colorlog.default_log_colors['C'] = "bold_red"
        logging.addLevelName(logging.DEBUG, 'D')
        logging.addLevelName(logging.WARNING, 'W')

        SUCCESS = logging.DEBUG + 1
        logging.addLevelName(SUCCESS, 'success')
        colorlog.default_log_colors['success'] = "bold_green"
        setattr(self.logger, 'success', lambda message, *args: self.logger._log(SUCCESS, message, args))

        # Console log msg setting
        sh = colorlog.StreamHandler()
        sh.setLevel(logging.DEBUG + 1)
        sh_fmt = colorlog.ColoredFormatter('%(log_color)s> %(message)s')
        sh.setFormatter(sh_fmt)
        self.logger.addHandler(sh)

        # File log msg setting
        self.config = Config()
        product_name = self.config.get_product_name()
        folder_name = "{}_Log_{}".format(product_name,
                                         datetime.now().year)
        folder_path = os.path.join(os.getcwd(), folder_name)
        self._make_sure_dir_exists(folder_path)

        filename = '{}.txt'.format(datetime.now().strftime("Log %Y%m%d"))
        self.log_path = os.path.join(folder_path, filename)

        fh = logging.FileHandler(self.log_path)
        fmt = logging.Formatter('%(asctime)s, %(levelname)s, %(module)s, %(station)s, %(serial)s, "%(message)s"',
                                datefmt='%Y-%m-%d %H:%M:%S')
        fh.setFormatter(fmt)
        que = Queue.Queue(-1)
        queue_handler = QueueHandler(que)
        queue_handler.setLevel(logging.INFO)
        self.logger.addHandler(queue_handler)
        self.listener = QueueListener(que, fh)

        self.latest_filter = None
Example #9
0
    def logger_init(self):
        mp_queue = multiprocessing.Queue()
        # extract stream handler from logger, temp. remove from logger but send
        # to queue listener - on closing Pool stream handler is added again
        logger = logging.getLogger(self.logger_name)
        stream_handlers = [
            x for x in logger.handlers if type(x) == logging.StreamHandler
        ]
        # this is necessary if we don't want to setup the hander in tests
        if stream_handlers:
            self.stream_handler = stream_handlers[0]

        logger.removeHandler(self.stream_handler)
        # queue_listener gets records from the queue
        # and sends them to the the logger stream handler.
        mp_queue_listener = QueueListener(mp_queue, self.stream_handler)
        mp_queue_listener.start()

        return mp_queue_listener, mp_queue
Example #10
0
def std_logging_queue_handler():
    start = datetime.datetime.now()
    q = Queue(-1)

    logger = logging.getLogger()
    hdlr = logging.FileHandler('qtest.out', 'w')
    ql = QueueListener(q, hdlr)


    # Create log and set handler to queue handle
    root = logging.getLogger()
    root.setLevel(logging.DEBUG) # Log level = DEBUG
    qh = QueueHandler(q)
    root.addHandler(qh)

    ql.start()

    for i in range(100000):
        logging.info("msg:%d" % i)
    ql.stop()
    print(datetime.datetime.now() - start)
    def __init__(self, parent=None, target=None, args=(), kwargs={}, timer_check_interval=1000, log_handler=None):
        super(SubprocessWorker, self).__init__(parent)
        self.id = self.get_next_worker_id()
        self.running = False
        self.timer = None
        self.timer_check_interval=timer_check_interval
        self.process = None
        self.log_handler = log_handler if log_handler is not None else QtLogHandler()
        self.log_handler.messageEmitted.connect(self.handle_message)
        self.log_queue = multiprocessing.Queue()
        self.result_queue = multiprocessing.Queue()
        self.log_queue_listener = QueueListener(self.log_queue, self.log_handler)
        self.target = target
        self.args = tuple(args)
        self.kwargs = dict(kwargs)

        self.thread = QtCore.QThread()
        self.moveToThread(self.thread)

        self.thread.started.connect(self.start)
        self.thread.finished.connect(self.stop)

        log.debug('Worker {} initialized on thread {}'.format(self.id, str(self.thread)))
Example #12
0
 def __init__(self):
     self.q = Queue(-1)
     self.ql = QueueListener(self.q, *tuple(getLogger('PacPac').handlers))
     self.qh = QueueHandler(self.q)
Example #13
0
 def start_mp_logging(self):
     logger.debug("Start listening child logs")
     self.logging_queue = Queue()
     handlers = logger.handlers
     self.logging_listener = QueueListener(self.logging_queue, *handlers)
     self.logging_listener.start()
Example #14
0
 def start(self):
     self.queue = multiprocessing.Queue(-1)
     # Solve this problem? http://stackoverflow.com/questions/25585518/python-logging-logutils-with-queuehandler-and-queuelistener
     self.ql = QueueListener(self.queue, *logging.getLogger().handlers)
     self.ql.start()
Example #15
0
 def start(self):
     if not self.running:
         self.queue_listener = QueueListener(self.queue)
         self.queue_listener.start()
     else:
         raise Exception("already running")
Example #16
0
    if os.path.isdir('./openwarpgui/nemoh'):
        subprocess.call(['python', 'setup.py', 'build_ext', '--inplace'],
                        cwd='openwarpgui/nemoh')
    else:
        subprocess.call(['python', 'setup.py', 'build_ext', '--inplace'],
                        cwd='nemoh')
    logger = logging.getLogger(__name__)

    if len(sys.argv) <= 1:
        utility.log_and_print(
            logger,
            'Error: No configurations file given to the CLI. Usage of script is: openwarp_cli configuration1 .. '
        )

    queue = multiprocessing.Queue(-1)
    ql = QueueListener(queue, *logging.getLogger().handlers)
    ql.start()

    for i in range(1, len(sys.argv)):
        path = sys.argv[i]
        user_config = None
        # Checking if it is a valid path
        if os.path.exists(path):
            utility.log_and_print(
                logger, 'Processing configuration file at ' + path + '\n')
            with open(path, 'rt') as f:
                user_config = json.load(f)
        else:  # Check if it is json string
            try:
                user_config = json.loads(path)
            except Exception as e: