Exemplo n.º 1
0
def _setup_logging():
    global _logger
    global _queue_listener

    logging_cfg = _get_logging_settings()
    log_queue = Queue(-1)

    _logger = logging.getLogger('sento-crawler')

    _logger.setLevel(logging_cfg.get('level'))

    logger_formatter = logging.Formatter(LOG_FORMAT)
    logger_formatter.converter = time.gmtime
    out_handler = None  # type: logging.Handler

    if logging_cfg.get('output') == VALID_OUTPUTS[0]:
        out_handler = logging.StreamHandler()
    else:
        logs_path = Path('./logs')
        logs_path.mkdir(exist_ok=True)

        out_handler = TimedRotatingFileHandler(
            filename='logs/sento_crawler.log', when='midnight', utc=True)

    out_handler.setLevel(logging.INFO)
    out_handler.setFormatter(logger_formatter)

    logger_handler = QueueHandler(log_queue)
    _queue_listener = QueueListener(log_queue, out_handler)

    _logger.addHandler(logger_handler)

    # The queue listener must be stopped when execution finishes
    # This line spawns a listener in another thread!
    _queue_listener.start()
Exemplo n.º 2
0
    def __init__(self, url, level=NOTSET):
        self._log_queue = queue.Queue(-1)
        super().__init__(self._log_queue)

        teams_handler = TeamsHandler(url, level)
        teams_log_listener = QueueListener(self._log_queue, teams_handler)
        teams_log_listener.start()
Exemplo n.º 3
0
 def __init__(self, chat_ids: List[str], token: str, proxies: Optional[Dict[str, str]]=None,
              disable_web_page_preview: bool=False, disable_notification: bool=False,
              reply_to_message_id: Optional[int]=None,
              reply_markup: Optional[Dict[str, Any]]=None) -> None:
     """
     Initialization.
     :param token: Telegram token.
     :optional proxies: Proxy for requests. Format proxies corresponds format proxies 
     in requests library.
     Parameters for message to telegram, see https://core.telegram.org/bots/api#sendmessage
     :optional disable_web_page_preview: Disables link previews for links in this message.
     :optional disable_notification: Sends the message silently. 
     Users will receive a notification with no sound.
     :optional reply_to_message_id: If the message is a reply, ID of the original message.
     :optional reply_markup: Additional interface options. 
     A JSON-serialized object for an inline keyboard, custom reply keyboard,
     instructions to remove reply keyboard or to force a reply from the user.
     """
     self.queue = Queue(-1)  # type: Queue
     super().__init__(self.queue)
     self.handler = TelegramMessageHandler(
         chat_ids,
         token,
         proxies=proxies,
         disable_web_page_preview=disable_web_page_preview,
         disable_notification=disable_notification,
         reply_to_message_id=reply_to_message_id,
         reply_markup=reply_markup
     )
     # Set default formatter
     self.handler.setFormatter(TelegramHtmlFormatter())
     self.listener = QueueListener(self.queue, self.handler)
     self.listener.start()
 def __init__(self, logfile_path, console=False):
     """
     Logger API at Client Side to store the logs locally and sent to Central Logger MQ
     Parameters - RMQ - Create a RabbitMQ Object and pass it 
                - logfile_path - Path where to create log file
                - console - whether to diaplay log messages on screen - Default false
     """
     self.RMQ = RabbitMQ()
     #Creating queue and logger
     self.log_queue = queue.Queue(-1)  #infinite size
     self.queue_handler = QueueHandler(self.log_queue)
     self.logger = logging.getLogger()
     self.logger.addHandler(self.queue_handler)
     #formatter
     self.formatter = logging.Formatter(' %(message)s')
     #file handler - write to file
     self.file_handler_loc = logging.FileHandler(logfile_path)
     self.file_handler_loc.setFormatter(self.formatter)
     #console handler - print on screen
     if (console == True):
         self.console_handler = logging.StreamHandler()
         self.console_handler.setFormatter(self.formatter)
         self.listener = QueueListener(self.log_queue, self.console_handler,
                                       self.file_handler_loc)
     else:
         self.listener = QueueListener(self.log_queue,
                                       self.file_handler_loc)
Exemplo n.º 5
0
def start_logger():
    queue = SimpleQueue()

    formatter = logging.Formatter(
        "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
        )

    logger = logging.getLogger("cranehook")
    logger.setLevel(settings.LOG_LEVEL)

    stream_handler = logging.StreamHandler()
    stream_handler.setFormatter(formatter)
    stream_handler.setLevel(logging.DEBUG)

    discord_handler = DiscordHandler(settings.DISCORD_WEBHOOK_URL, "cranehook")
    discord_handler.setFormatter(formatter)
    discord_handler.setLevel(logging.INFO)
    queue_listner = QueueListener(
        queue, discord_handler, stream_handler, respect_handler_level=True
    )

    queue_handler = QueueHandler(queue)
    logger.addHandler(queue_handler)

    queue_listner.start()
def logger_init(file_location="multi.log"):
    q = multiprocessing.Queue()
    # this is the handler for all log records
    stream_handler = logging.StreamHandler()
    formatter = logging.Formatter(
        "%(levelname)s: %(asctime)s - %(process)s - [%(filename)s:%(lineno)s] - %(message)s"
    )
    stream_handler.setFormatter(formatter)
    file_handler = logging.FileHandler(file_location, encoding="utf8")
    file_handler.setFormatter(formatter)

    # ql gets records from the queue and sends them to the handler
    ql = QueueListener(q,
                       stream_handler,
                       file_handler,
                       respect_handler_level=True)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # add the handler to the logger so records from this process are handled
    logger.addHandler(stream_handler)
    logger.addHandler(file_handler)

    return ql, q
Exemplo n.º 7
0
    def __init__(
            self,
            setup: Callable[..., WorkerSetupResult],
            function: Callable[[WorkerSetupResult], WorkerFunctionResult],
            cleanup: Callable[[WorkerSetupResult], None],
            setup_args: Optional[Tuple] = None,
            setup_kwargs: Optional[Dict] = None,
            log_handlers: Iterable[Handler] = (),
    ):
        self._running = True

        self._task_queue = mp.Queue(maxsize=500)  # TODO: figure out good value

        logging_queue = mp.Queue()
        self._log_listener = QueueListener(logging_queue, *log_handlers)
        self._log_listener.start()

        self._should_terminate_flag = mp.Value(c_bool, 0)

        self._process = mp.Process(
            name="Pye3D Background Process",
            daemon=True,
            target=BackgroundProcess._worker,
            kwargs=dict(
                setup=setup,
                function=function,
                cleanup=cleanup,
                task_queue=self._task_queue,
                should_terminate_flag=self._should_terminate_flag,
                logging_queue=logging_queue,
                setup_args=setup_args if setup_args else (),
                setup_kwargs=setup_kwargs if setup_kwargs else {},
            ),
        )
        self._process.start()
Exemplo n.º 8
0
def logger_init(dirpath=None):
    # Adapted from http://stackoverflow.com/a/34964369/164864
    logging_queue = multiprocessing.Queue()
    # this is the handler for all log records
    filepath = "{}-{}.log".format(
        'pandarus-worker',
        datetime.datetime.now().strftime("%d-%B-%Y-%I-%M%p"))
    if dirpath is not None:
        filepath = os.path.join(dirpath, filepath)
    handler = logging.FileHandler(
        filepath,
        encoding='utf-8',
    )
    handler.setFormatter(
        logging.Formatter("%(asctime)s %(levelname)s %(lineno)d %(message)s"))

    # queue_listener gets records from the queue and sends them to the handler
    queue_listener = QueueListener(logging_queue, handler)
    queue_listener.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    logger.addHandler(handler)

    return queue_listener, logging_queue
Exemplo n.º 9
0
def config_logging():
    if settings.DINGTALK_WEBHOOK and settings.DINGTALK_SECRET:
        dingtalk_queue = Queue()
        DEFAULT_LOGGING["handlers"]["dingtalk"] = {
            "level": logging.INFO,
            "class": "logging.handlers.QueueHandler",
            "queue": dingtalk_queue,
            "formatter": "simple",
        }
        DEFAULT_LOGGING["loggers"]["notifier"]["handlers"] = [
            "console",
            "file",
            "websocket",
            "dingtalk",
        ]
        dingtalk_handler = DingTalkHandler(
            webhook_url=settings.DINGTALK_WEBHOOK, secret=settings.DINGTALK_SECRET
        )
        dingtalk_listener = QueueListener(dingtalk_queue, dingtalk_handler)
        dingtalk_listener.start()

    Path(LOG_LOCATION).parent.mkdir(parents=True, exist_ok=True)
    logging.config.dictConfig(DEFAULT_LOGGING)
    ws_handler = YuFuRobotStreamWebsocketHandler(ws_uri=settings.WS_ROBOT_STREAM_URI)
    ws_listener = WebsocketListener(ws_queue, ws_handler)
    ws_listener.start()
Exemplo n.º 10
0
class CentralizedLogHandler(QueueHandler):
    """
    A queue handler to centralize multiple worker logging.

    https://docs.python.org/3/library/logging.handlers.html#queuehandler
    """
    __slots__ = "queue", "_listener"

    def __init__(self,
                 handlers: List[logging.Handler],
                 queue: Any = None,
                 respect_handler_level: bool = True):
        """
        Initialize queued log handler.

        :param list of logging.Handler handlers: Logging handlers
        :param queue.Queue queue: transport queue
        :param bool respect_handler_level: respect handler levels
        """
        self.queue = queue or Queue()
        self._listener = QueueListener(
            self.queue, *handlers, respect_handler_level=respect_handler_level)
        self._listener.start()
        atexit.register(lambda: self._listener.stop)
        super().__init__(self.queue)
Exemplo n.º 11
0
    def default(cls) -> logging.Logger:
        """Defines non-blocking application logger.
        Inspiration: https://www.zopatista.com/python/2019/05/11/asyncio-logging/

        Returns:
            logging.Logger: Root logger
        """
        # get root logger
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)

        formatter = logging.Formatter(
            fmt="%(asctime)s | %(levelname)8s | %(message)60s | %(filename)s:%(lineno)d at %(name)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        )

        stdout_handler = logging.StreamHandler(stream=sys.stdout)
        stdout_handler.setLevel(logging.DEBUG)
        stdout_handler.setFormatter(formatter)

        queue = SimpleQueue()
        queue_handler = QueueHandler(queue)
        logger.addHandler(queue_handler)

        listener = QueueListener(queue, *[stdout_handler], respect_handler_level=True)

        listener.start()

        return logger
Exemplo n.º 12
0
class sfLogger:
    def __init__(self, logger_name):
        self.format = logging.Formatter("%(message)s")
        self.log_queue = queue.Queue()
        self.queue_handler = QueueHandler(self.log_queue)
        self.queue_handler.setFormatter(self.format)
        self.logger = logging.getLogger(logger_name)
        self.logger.addHandler(self.queue_handler)
        self.logger.setLevel(logging.DEBUG)
        self.listener = QueueListener(self.log_queue, self.queue_handler)
        self.isStop = False

    def start(self):
        #print("logger.start()")
        self.listener.start()
        self.isStop = False

    def loggenerator(self):
        #print("logger.loggenerator()")
        while self.isStop == False:
            yield self.log_queue.get().getMessage()

    def stop(self):
        #print("logger.stop()")
        self.listener.stop()
        self.isStop = True
        while self.log_queue.empty() == False:
            self.log_queue.get().getMessage()
Exemplo n.º 13
0
    def __init__(self, n_cpu, **kwargs):
        self.queue = JoinableQueue()
        self.log_queue = Queue()
        self.n_tasks = Value('i', 0)
        kwargs["n_tasks"] = self.n_tasks

        self.processes = [
            Process(target=self.run_trial, kwargs=kwargs)
            for _ in range(int(n_cpu))
        ]

        self.mh = MinimisationHandler.create(kwargs["mh_dict"])
        for season in self.mh.seasons.keys():
            inj = self.mh.get_injector(season)
            inj.calculate_n_exp()
        self.mh_dict = kwargs["mh_dict"]
        self.scales = []

        handler = logging.StreamHandler()
        handler.setFormatter(
            logging.Formatter(
                "%(levelname)s: %(asctime)s - %(process)s - %(message)s"))
        # ql gets records from the queue and sends them to the handler

        ql = QueueListener(self.log_queue, handler)
        ql.start()

        for p in self.processes:
            p.start()
Exemplo n.º 14
0
class WorkflowTest(unittest.TestCase):
    def setUp(self):

        # communications channels
        parent_workflow_conn, child_workflow_conn = multiprocessing.Pipe()
        parent_mpl_conn, child_matplotlib_conn = multiprocessing.Pipe()
        running_event = multiprocessing.Event()

        # logging
        log_q = multiprocessing.Queue()

        def handle(record):
            logger = logging.getLogger(record.name)
            if logger.isEnabledFor(record.levelno):
                logger.handle(record)

        handler = CallbackHandler(handle)
        self.queue_listener = QueueListener(log_q, handler)
        self.queue_listener.start()

        remote_process = multiprocessing.Process(
            target=remote_main,
            name="remote process",
            args=[parent_workflow_conn, parent_mpl_conn, log_q, running_event])

        remote_process.daemon = True
        remote_process.start()
        running_event.wait()

        self.workflow = Workflow((child_workflow_conn, child_matplotlib_conn))
        self.remote_process = remote_process

    def tearDown(self):
        self.workflow.shutdown_remote_process(self.remote_process)
        self.queue_listener.stop()
Exemplo n.º 15
0
    def setUp(self):

        # communications channels
        parent_workflow_conn, child_workflow_conn = multiprocessing.Pipe()
        running_event = multiprocessing.Event()

        # logging
        log_q = multiprocessing.Queue()

        def handle(record):
            logger = logging.getLogger(record.name)
            if logger.isEnabledFor(record.levelno):
                logger.handle(record)

        handler = CallbackHandler(handle)
        self.queue_listener = QueueListener(log_q, handler)
        self.queue_listener.start()

        remote_process = multiprocessing.Process(
            target=remote_main,
            name="remote process",
            args=[parent_workflow_conn, log_q, running_event])

        remote_process.daemon = True
        remote_process.start()
        running_event.wait()

        self.workflow = LocalWorkflow(child_workflow_conn)
        self.remote_process = remote_process
Exemplo n.º 16
0
class QueueLogger:
    def __init__(self):
        self.structlog_q = None
        self.structlog_q_handler = None
        self.structlog_listener = None
        self.initialized = False

    def initialize_q(self):
        self.structlog_q = multiprocessing.Queue(-1)
        self.structlog_q_handler = QueueHandler(self.structlog_q)
        self.initialized = True

    def format_logger(self, logger):
        logger.addHandler(self.structlog_q_handler)

    def configure_listener(self, handlers):
        self.structlog_listener = QueueListener(self.structlog_q, *handlers, respect_handler_level=True)

    def setup_queue_logging(self, logger, handlers):
        self.initialize_q()
        self.format_logger(logger)
        self.configure_listener(handlers)

    def start(self):
        self.structlog_listener.start()

    def stop(self):
        if self.initialized:
            self.structlog_listener.stop()
Exemplo n.º 17
0
    def _load(self, settings):
        disabled = settings.get("disabled", list())
        handlers = list()
        if "stream" not in disabled:
            stream = settings.get("stream", dict())
            stream_level = stream.get("level", None)
            stream_formatter = stream.get("formatter", None)
            stream_date_format = stream.get("date_format", None)
            stream_args = stream_level, stream_formatter, stream_date_format
            stream_handler = Stream(*stream_args)
            handlers.append(stream_handler)

        if "file" not in disabled:
            rotated = settings.get("file", dict())
            rotated_filename = rotated.get("filename", f"{self.name}.log")
            rotated_when = rotated.get("when", "midnight")
            rotated_interval = rotated.get("interval", 1)
            rotated_backup_count = rotated.get("backup_count", 5)
            rotated_level = rotated.get("level", None)
            rotated_formatter = rotated.get("formatter", None)
            rotated_date_format = rotated.get("date_format", None)
            rotated_args = (rotated_filename, rotated_when, rotated_interval,
                            rotated_backup_count, rotated_level,
                            rotated_formatter, rotated_date_format)
            rotated_handler = Rotated(*rotated_args)
            handlers.append(rotated_handler)

        self.queue = Queue()
        self.queue_handler = QueueHandler(self.queue)

        args = tuple(handlers)
        kwargs = dict()
        kwargs["respect_handler_level"] = True
        self.listener = QueueListener(self.queue, *args, **kwargs)
        self.core.addHandler(self.queue_handler)
Exemplo n.º 18
0
def configure_logging(name, level=logging.INFO):
    file_handler = RotatingFileHandler(name,
                                       mode="a+",
                                       maxBytes=48000,
                                       backupCount=1)
    file_handler.setFormatter(
        FileFormatter(
            "%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        ))
    stream_handler = StreamHandler(stream=sys.stdout)
    stream_handler.setFormatter(
        ColouredFormatter(
            "%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s",
            datefmt="%Y-%m-%d %H:%M:%S",
        ))

    log_queue = Queue()
    queue_handler = LocalQueueHandler(log_queue)

    root_logger = logging.getLogger()
    root_logger.setLevel(level)
    root_logger.addHandler(queue_handler)

    listener = QueueListener(log_queue,
                             file_handler,
                             stream_handler,
                             respect_handler_level=True)
    listener.start()
Exemplo n.º 19
0
    def initLogger(self):
        if not self._capture_output:
            return

        #普通日志输出控制台
        if self._stream is None:
            self._stream = EmittingStream(textWritten=self.normalOutputWritten)
        self.log_handler = logging.StreamHandler(self._stream)
        FORMAT = logging.Formatter(
            '%(asctime)-15s [%(levelname)s] - %(message)s [%(name)s::%(funcName)s]'
        )
        self.log_handler.setFormatter(FORMAT)
        add_class_logger_handler(
            [
                MyMainWindow,
                CollectSpotThread,  #CollectToMySQLThread, CollectToMemThread,
                UsePytdxImportToH5Thread,
                UseTdxImportToH5Thread,
                ImportTdxToH5Task,
                SchedImportThread
            ],
            logging.INFO)
        for name in logging.Logger.manager.loggerDict.keys():
            logger = logging.getLogger(name)
            logger.addHandler(self.log_handler)
            logger.setLevel(logging.DEBUG)

        # 多进程日志队列
        self.mp_log_q = multiprocessing.Queue()
        self.mp_log_q_lisener = QueueListener(self.mp_log_q, self.log_handler)
        self.mp_log_q_lisener.start()
Exemplo n.º 20
0
def logger_init(log_fname):
    q = Queue()

    try:
        handler = logging.FileHandler(log_fname)
    except PermissionError as e:
        print("logger_init: Error = ", str(e))
        handler = logging.StreamHandler()
        print("logger_init: StreamHandler selected.")
    except Exception as e:
        print("logger_init: Unexpected Error = ", str(e))
        handler = logging.StreamHandler()

    handler.setFormatter(
        logging.Formatter(
            "%(levelname)s: %(asctime)s - %(process)s - %(message)s"))

    ql = QueueListener(q, handler)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    logger.addHandler(handler)

    return ql, q
Exemplo n.º 21
0
def initialize_logging(
        command_name: str,
        log_queue: "Queue[logging.LogRecord]") -> Callable[[], None]:
    """Initialize logging handlers and configuration.

    Args:
        command_name: Name of the command that is being logged.
        log_queue: Logging queue to collect log messages from sub-processes.

    Returns:
        Callback to stop the log queue listener when shutting down the platform.

    """
    _configure_logging(command_name)
    _log_unhandled_exceptions()

    log_listener = QueueListener(log_queue,
                                 *logging.getLogger().handlers,
                                 respect_handler_level=True)
    log_listener.start()

    def cleanup_callback() -> None:
        log_listener.stop()

    logger.debug(
        f"Initialized logging for main process (pid={os.getpid()}, parent={os.getppid()}, platform={platform()})"
    )

    return cleanup_callback
Exemplo n.º 22
0
 def __init__(self, job, level):
     
     self.job = job
     self.level = level
     
     # Create queue through which log records can be sent from various
     # processes and threads to the logging thread.
     self.queue = Queue()
     
     formatter = Formatter('%(asctime)s %(levelname)-8s %(message)s')
     
     # Create handler that writes log messages to the job log file.
     os_utils.create_parent_directory(job.log_file_path)
     file_handler = FileHandler(job.log_file_path, 'w')
     file_handler.setFormatter(formatter)
     
     # Create handler that writes log messages to stderr.
     stderr_handler = StreamHandler()
     stderr_handler.setFormatter(formatter)
     
     self._record_counts_handler = _RecordCountsHandler()
     
     # Create logging listener that will run on its own thread and log
     # messages sent to it via the queue.
     self._listener = QueueListener(
         self.queue, file_handler, stderr_handler,
         self._record_counts_handler)
Exemplo n.º 23
0
def configure_log_listener(console: bool = True,
                           log_path: str = "main.log") -> QueueListener:
    """
    Configure log queue listener to log into file and console.
    Args:
        console (bool): whether to log on console
        log_path (str): path of log file
    Returns:
        log_qlistener (logging.handlers.QueueListener): configured log queue listener
    """
    global log_qlistener
    try:
        atexit.unregister(log_qlistener.stop)
        log_qlistener.stop()
    except (AttributeError, NameError):
        pass

    handlers: List[logging.Handler] = []

    # rotating file handler
    if log_path:
        file_handler = _get_file_handler(log_path)
        handlers.append(file_handler)

    # console handler
    if console:
        stdout_handler = _get_stdout_handler()
        handlers.append(stdout_handler)

    log_qlistener = QueueListener(log_queue,
                                  *handlers,
                                  respect_handler_level=True)
    log_qlistener.start()
    atexit.register(log_qlistener.stop)
    return log_qlistener
Exemplo n.º 24
0
 def setup_threaded_logging(self):
     self.logging_queue = MPQueue(-1)
     shandler = logging.StreamHandler()
     sformatter = logging.Formatter('[%(name)s] %(levelname)s: %(message)s')
     shandler.setFormatter(sformatter)
     ql = QueueListener(self.logging_queue, shandler)
     ql.start()
     return ql
Exemplo n.º 25
0
class LokiQueueHandler(QueueHandler):
    """This handler automatically creates listener and `LokiHandler` to handle logs queue."""
    def __init__(self, queue: Queue, **kwargs):
        """Create new logger handler with the specified queue and kwargs for the `LokiHandler`."""
        super().__init__(queue)
        self.handler = LokiHandler(**kwargs)  # noqa: WPS110
        self.listener = QueueListener(self.queue, self.handler)
        self.listener.start()
Exemplo n.º 26
0
    def __init__(self, level=logging.NOTSET):
        self._handler = RequestHandler()

        self._queue_handler = logging.handlers.QueueHandler(queue)
        self._queue_handler.setLevel(level)

        self._listener = QueueListener(queue, self._handler)
        self._listener.start()
        super().__init__(level)
Exemplo n.º 27
0
def setup_main_logging(config) -> mp.Queue:
    log_queue = mp.Queue()
    log_handler = NeptuneLogHandler(config)
    log_handler.setLevel(logging.INFO)
    listener = QueueListener(log_queue,
                             log_handler,
                             respect_handler_level=True)
    listener.start()
    return log_queue
Exemplo n.º 28
0
 def __init__(self,
              queue: Queue,
              url: str,
              tags: Optional[dict] = None,
              auth: BasicAuth = None):
     super().__init__(queue)
     self.handler = LokiHandler(url, tags, auth)
     self.listener = QueueListener(self.queue, self.handler)
     self.listener.start()
Exemplo n.º 29
0
class LokiQueueHandler(QueueHandler):
    """
    This handler automatically creates listener and `LokiHandler` to handle logs queue.
    """

    def __init__(self, queue: Queue, url: str, tags: Optional[dict] = None, auth: BasicAuth = None):
        super().__init__(queue)
        self.handler = LokiHandler(url, tags, auth)
        self.listener = QueueListener(self.queue, self.handler)
        self.listener.start()
Exemplo n.º 30
0
 def __init__(self, logger_name):
     self.format = logging.Formatter("%(message)s")
     self.log_queue = queue.Queue()
     self.queue_handler = QueueHandler(self.log_queue)
     self.queue_handler.setFormatter(self.format)
     self.logger = logging.getLogger(logger_name)
     self.logger.addHandler(self.queue_handler)
     self.logger.setLevel(logging.DEBUG)
     self.listener = QueueListener(self.log_queue, self.queue_handler)
     self.isStop = False
def logger_init():
    q = multiprocessing.Queue()
    # this is the handler for all log records
    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter("%(levelname)s: %(asctime)s - %(process)s - %(message)s"))

    # ql gets records from the queue and sends them to the handler
    ql = QueueListener(q, handler)
    ql.start()

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    # add the handler to the logger so records from this process are handled
    logger.addHandler(handler)

    return ql, q
Exemplo n.º 32
0
 def __init__(self, queue=None, logging_url="", channel="", username="",
              icon_emoji = ""):
     QueueListener.__init__(self,queue)
     Handler.__init__(self)
     """
     logging_url, channel, username, icon_emoji can all be overridden
     by the extra dictionary parameter of a logging record
     For example: 
         logging.info('Test messate',extra={'channel':'@someone',
                                            'username':'******',
                                            'icon_emoji':':penguin:'})
     """
     self.logging_url = logging_url
     self.payload = {
         "channel": channel,
         "username": username,
         "icon_emoji": icon_emoji
         }
Exemplo n.º 33
0
def get_logging_queue():
    #This probably will have to be refactored to get access to manager as well.
    global __queue, __manager
    if __queue is None:
        m = multiprocessing.Manager()
        __manager = m
        q = m.Queue(-1)
        #https://docs.python.org/3/howto/logging-cookbook.html
        listener = QueueListener(q, *logging.getLogger().handlers)
        listener.start()
        def exithandler():
            q.join()
            listener.stop()
            #Seems to help silencing bugs...
            import time; time.sleep(0.2)
            m.shutdown()

        atexit.register(exithandler)
        __queue = q
        return q
    return __queue
Exemplo n.º 34
0
# Blocking Handlers
import queue
import logging
from logging.handlers import QueueHandler, QueueListener

que = queue.Queue(-1)
queue_handler = QueueHandler(que)
handler = logging.StreamHandler()
listener = QueueListener(que, handler)
root = logging.getLogger()
root.addHandler(queue_handler)
formatter = logging.Formatter("%(threadName)s: %(message)s")
handler.setFormatter(formatter)
listener.start()
root.warning("Look Out")
listener.stop()
Exemplo n.º 35
0
class JobLoggingManager:
    
    """
    Manages logging for a Vesper job.
    
    A `JobLoggingManager` manages logging for the processes of a Vesper job.
    Log records can be submitted by any process of a job using any logger
    (typically the root logger) configured with the `configure_logger`
    static method. A logger so configured writes each log record to a
    multiprocessing queue that is read by a thread running in the main
    job process, which in turn writes log messages to the job's log file.
    """
    
    
    @staticmethod
    def configure_logger(logger, logging_config):
        
        """
        Configures the specified logger to write log records to this job's
        logging queue.
        
        For the `logging_config` argument, the main job process can pass
        the `logging_config` attribute of its `JobLoggingManager`. This
        information is also passed to the `execute` method of the job's
        command as the `logging_config` attribute of the command's
        execution context. The information is picklable, so it can be
        delivered easily to any additional process started by the main
        job process as an argument to the process's target function.        
        """
        
        level, queue = logging_config
        
        logger.setLevel(level)
        
        handler = QueueHandler(queue)
        logger.addHandler(handler)

        
    def __init__(self, job, level):
        
        self.job = job
        self.level = level
        
        # Create queue through which log records can be sent from various
        # processes and threads to the logging thread.
        self.queue = Queue()
        
        formatter = Formatter('%(asctime)s %(levelname)-8s %(message)s')
        
        # Create handler that writes log messages to the job log file.
        os_utils.create_parent_directory(job.log_file_path)
        file_handler = FileHandler(job.log_file_path, 'w')
        file_handler.setFormatter(formatter)
        
        # Create handler that writes log messages to stderr.
        stderr_handler = StreamHandler()
        stderr_handler.setFormatter(formatter)
        
        self._record_counts_handler = _RecordCountsHandler()
        
        # Create logging listener that will run on its own thread and log
        # messages sent to it via the queue.
        self._listener = QueueListener(
            self.queue, file_handler, stderr_handler,
            self._record_counts_handler)
        
        
    @property
    def logging_config(self):
        return (self.level, self.queue)
    
    
    @property
    def record_counts(self):
        return dict(self._record_counts_handler.record_counts)
    
    
    def start_up_logging(self):
        self._listener.start()
        
        
    def shut_down_logging(self):
        
        # Tell logging listener to terminate, and wait for it to do so.
        self._listener.stop()
        
        logging.shutdown()
Exemplo n.º 36
0
    def run(self, p_processors_nb_threads, p_writer_nb_threads=None):
        # All log messages come and go by this queue
        log_queue = Queue()
        logger = logging.getLogger('swallow')

        if len(logger.handlers) > 1:
            logger.warn("Several handlers detected on swallow logger but can't log to more than a single handler in multiprocessing mode. Only the first one will be used.")
        elif len(logger.handlers) == 0:
            logger.warn("No handler defined for swallow logger. Log to console with info level.")
            # Handler console
            stream_handler = logging.StreamHandler()
            stream_handler.setLevel(logging.INFO)
            logger.addHandler(stream_handler)

        # each log_listener gets records from the queue and sends them to a specific handler
        handler = logger.handlers[0]
        formatter = handler.formatter
        listener = QueueListener(log_queue, handler)
        listener.start()

        if p_writer_nb_threads is None:
            p_writer_nb_threads = p_processors_nb_threads

        logger.info('Running swallow process. Processor on %i threads / Writers on %i threads', p_processors_nb_threads, p_writer_nb_threads)

        start_time = datetime.datetime.now()

        # Set extra properties to readers
        for reader in self.readers:
            reader['reader'].counters = self.counters
            reader['reader'].log_queue = log_queue
            reader['reader'].log_level = logger.level
            reader['reader'].formatter = formatter

        # Set extra properties to writer
        if self.writer is not None:
            self.writer.counters = self.counters
            self.writer.log_queue = log_queue
            self.writer.log_level = logger.level
            self.writer.formatter = formatter

        read_worker = [Process(target=reader['reader'].scan_and_queue, args=(self.in_queue,), kwargs=(reader['args'])) for reader in self.readers]
        process_worker = [Process(target=get_and_parse, args=(self.in_queue, self.out_queue, self.process, self.counters, log_queue, logger.level, formatter), kwargs=(self.process_args)) for i in range(p_processors_nb_threads)]

        # writers are optionnal
        if self.writer is not None:
            write_worker = [Process(target=self.writer.dequeue_and_store, args=(self.out_queue,), kwargs=(self.writer_store_args)) for i in range(p_writer_nb_threads)]
        else:
            write_worker = []

        # Running workers
        for work in read_worker:
            work.start()
        for work in process_worker:
            work.start()
        for work in write_worker:
            work.start()

        # Waiting for workers to end :
        # worker.join() blocks the programm till the worker ends
        logger.info('Waiting for reader to finish')
        for work in read_worker:
            # Waiting for all reader to finish their jobs
            work.join()

        # At this point, reading is finished. We had a poison pill for each consumer of read queue :
        for i in range(len(process_worker)):
            self.in_queue.put(None)

        logger.info('Waiting for processors to finish')
        for work in process_worker:
            # Waiting for all processors to finish their jobs
            work.join()

        # At this point, processing is finished. We had a poison pill for each consumer of write queue :
        for i in range(len(write_worker)):
            self.out_queue.put(None)

        logger.info('Waiting for writers to finish')
        for work in write_worker:
            # Waiting for all writers to finish their jobs
            work.join()

        elsapsed_time = datetime.datetime.now() - start_time
        logger.info('Elapsed time : %ss' % elsapsed_time.total_seconds())

        avg_time = 0
        nb_items = self.counters['nb_items_scanned'].value
        if nb_items:
            avg_time = 1000*self.counters['scan_time'].value / nb_items
        logger.info('{0} items scanned ({1}ms)'.format(nb_items, avg_time))

        avg_time = 0
        avg_time_idle = 0
        nb_items = self.counters['nb_items_processed'].value
        if nb_items:
            avg_time = 1000*self.counters['real_process_time'].value / nb_items
            avg_time_idle = 1000*self.counters['idle_process_time'].value / nb_items
        logger.info('{0} items processed (process : {1}ms / idle : {2}ms)'.format(nb_items, avg_time, avg_time_idle))

        avg_time = 0
        nb_items = self.counters['nb_items_stored'].value
        if nb_items:
            avg_time = 1000*self.counters['whole_storage_time'].value / nb_items
        logger.info('{0} items stored ({1}ms)'.format(nb_items, avg_time))

        nb_items = self.counters['nb_items_error'].value
        logger.info('{0} items error'.format(nb_items))

        # Stop listening for log messages
        listener.stop()