def __setstate__(self, state): self.__dict__.update(state) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.configuration.log_settings.log_level) self.logger.propagate = False self.engine = dbutils.connect(self.configuration, self.logger) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger.addHandler(self.handler) self.connect_to_db(engine=None, session=None)
def setup_logger(args): """ Function to setup the logger for the compare function. :param args: :param manager: :return: """ args.log_queue = mp.Queue(-1) args.queue_handler = log_handlers.QueueHandler(args.log_queue) if args.log is not None: _log_folder = os.path.dirname(args.log) if _log_folder and not os.path.exists(_log_folder): os.makedirs(_log_folder) handle = open(args.log, mode="wt") handle.close() handler = logging.FileHandler(handle.name) logger = logging.getLogger("main_compare") handler.setFormatter(formatter) logger.addHandler(handler) logger.propagate = False else: logger = create_default_logger("main_compare") handler = logger.handlers[0] if args.verbose is False: logger.setLevel(logging.INFO) else: logger.setLevel(logging.DEBUG) logger.propagate = False log_queue_listener = log_handlers.QueueListener(args.log_queue, logger) log_queue_listener.propagate = False log_queue_listener.start() queue_logger = logging.getLogger("main_queue") for handler in queue_logger.handlers: queue_logger.removeHandler(handler) if args.verbose is False: queue_logger.setLevel(logging.INFO) else: queue_logger.setLevel(logging.DEBUG) main_queue_handler = log_handlers.QueueHandler(args.log_queue) queue_logger.propagate = False queue_logger.addHandler(main_queue_handler) return args, handler, logger, log_queue_listener, queue_logger
def newLogger(name, configuration=None, level=10): """ Create and return a new logger connected to the main logging queue. :param name: Name (preferably of the calling module) that will show in the log records :param configuration: a dict configuration for the logger, or None to use the basic config :param level: log message level; default is 10 (DEBUG) :return: logger object """ if __listener is None: setupLogListener() if configuration is not None: # logging.config.dictConfig(base_config(name)) config.dictConfig(configuration) # else: q_handler = handlers.QueueHandler(__logging_queue) logger = logging.getLogger(name) try: # logger.setLevel(level.upper()) logger.setLevel(level) except ValueError: pass # let it default to warning # check for handlers, or we could get one logger spitting out # dozens of duplicate messages everytime it's called if not logger.hasHandlers(): logger.addHandler(q_handler) return logger
def __setup_logger(self): """ Private method to set up the logger using indications in the args namespace. """ if hasattr(self.args, "log_queue"): # noinspection PyUnresolvedReferences self.queue_handler = log_handlers.QueueHandler(self.args.log_queue) else: self.queue_handler = logging.NullHandler if self._counter is None: self.logger = logging.getLogger("stat_logger") else: self.logger = logging.getLogger("stat_logger-{}".format(self._counter)) self.logger.addHandler(self.queue_handler) # noinspection PyUnresolvedReferences if self.args.verbose: self.logger.setLevel(logging.DEBUG) else: self.logger.setLevel(logging.INFO) self.logger.propagate = False return
def replace_with_queue_handler(self): """ setup a central queue handler and start a listener thread """ all_loggers = [root_logger] + [ logging.getLogger(name) for name in root_logger.manager.loggerDict ] for logger in all_loggers: if not logger.handlers: continue # backup current handlers then clear it all_handlers = logger.handlers logger.handlers = [] # add queue handler queue = self.qclass() q_handler = handlers.QueueHandler(queue) logger.addHandler(q_handler) ql = handlers.QueueListener(queue, *all_handlers, respect_handler_level=True) self.q_listeners.append(ql) # start listening atexit.register(ql.stop) ql.start() root_logger.debug('Logging queue listener started!')
def __init__(self, queries, targets, filequeue: multiprocessing.Queue, returnqueue, default_header, identifier, logging_queue, level="WARN", max_target_seqs=10, maxobjects=20000, discard_definition=False): super().__init__() self.queries = queries self.targets = targets self.discard_definition = discard_definition self.level = level self.logging_queue = logging_queue self.handler = logging_handlers.QueueHandler(logging_queue) self.handler.setLevel(self.level) self.__identifier = identifier self.name = self._name = "_XmlPickler-{0}".format(self.identifier) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.level) self.filequeue = filequeue self.returnqueue = returnqueue self.default_header = default_header self.maxobjects = maxobjects self.__max_target_seqs = max_target_seqs self.logger.debug("Started %s", self.name)
def __setstate__(self, state): self.__dict__.update(state) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.json_conf["log_settings"]["log_level"]) self.logger.propagate = False self.engine = dbutils.connect(self.json_conf, self.logger) self.analyse_locus = functools.partial(analyse_locus, json_conf=self.json_conf, engine=self.engine, logging_queue=self.logging_queue) self.dump_db, self.dump_conn, self.dump_cursor = self._create_temporary_store(self._tempdir, self.identifier) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger.addHandler(self.handler)
def get_queued_logger(queue): """ Logger object that writes messages to multiprocess.Queue """ logger = logging.getLogger('spe_logger') queue_handler = handlers.QueueHandler(queue) logger.addHandler(queue_handler) logger.setLevel(logging.DEBUG) return logger
def PUBLogger(queue, level=logging.DEBUG): ''' Returns a logger to use for logging stuff. Key params: queue -- A queue to push records to. This in turn gets processed by a listener and redirected to a handler. level -- Set the logging level. Check formatters list down below to see all 5 levels. ''' formatters = { logging.DEBUG: logging.Formatter("[%(levelname)s] %(message)s"), logging.INFO: logging.Formatter("[%(levelname)s] %(message)s"), logging.WARN: logging.Formatter("[%(levelname)s] %(message)s"), logging.ERROR: logging.Formatter("[%(levelname)s] %(message)s"), logging.CRITICAL: logging.Formatter("[%(levelname)s] %(message)s") } logger = logging.getLogger() logger.setLevel(level) if not logger.handlers: handler = handlers.QueueHandler(queue) handler.formatters = formatters logger.addHandler(handler) return logger
def worker_configurer(queue): h = handlers.QueueHandler(queue) # Just the one handler needed logger = logging.getLogger("Listener") logger.addHandler(h) # send all messages, for demo; no other level or filter logic applied. logger.setLevel(logging.DEBUG) logger.info(f"Worker initiated")
def __setstate__(self, state): self.__dict__.update(state) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.handler.setLevel(self.level) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.level)
def setup_logger(args, manager): """ Function to setup the logger for the compare function. :param args: :param manager: :return: """ logger = create_default_logger("main_compare") args.log_queue = manager.Queue() args.queue_handler = log_handlers.QueueHandler(args.log_queue) log_queue_listener = log_handlers.QueueListener(args.log_queue, logger) log_queue_listener.propagate = False log_queue_listener.start() if args.log is not None: if os.path.exists(args.log): os.remove(args.log) handler = logging.FileHandler(args.log) handler.setFormatter(logger.handlers[0].formatter) # Remove stream handler logger.removeHandler(logger.handlers[0]) logger.addHandler(handler) else: handler = logger.handlers[0] if args.verbose is False: logger.setLevel(logging.INFO) else: logger.setLevel(logging.DEBUG) logger.propagate = False queue_logger = logging.getLogger("main_queue") for handler in queue_logger.handlers: queue_logger.removeHandler(handler) if args.verbose is False: queue_logger.setLevel(logging.INFO) else: queue_logger.setLevel(logging.DEBUG) main_queue_handler = log_handlers.QueueHandler(args.log_queue) queue_logger.propagate = False queue_logger.addHandler(main_queue_handler) return args, handler, logger, log_queue_listener, queue_logger
def call_to_device(): logger = logging.getLogger() # Capture warning logs as elements in a queue. logger.addHandler(handlers.QueueHandler(queue)) warned_types = set() if dedup_between_calls else None to_device(["string_data", "string_data"], "cpu", warned_types) to_device(["string_data", "string_data"], "cpu", warned_types)
def __init__(self, json_conf, locus_queue, logging_queue, status_queue, identifier, tempdir="mikado_pick_tmp"): # current_counter, gene_counter, current_chrom = shared_values super(LociProcesser, self).__init__() json_conf = msgpack.loads(json_conf, raw=False) self.logging_queue = logging_queue self.status_queue = status_queue self.__identifier = identifier # Property directly unsettable self.name = "LociProcesser-{0}".format(self.identifier) self.json_conf = json_conf self.engine = None self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.json_conf["log_settings"]["log_level"]) self.logger.propagate = False self._tempdir = tempdir self.locus_queue = locus_queue self.regressor = None # self.dump_db, self.dump_conn, self.dump_cursor = self._create_temporary_store(self._tempdir, self.identifier) if self.json_conf["pick"]["scoring_file"].endswith( (".pickle", ".model")): with open(self.json_conf["pick"]["scoring_file"], "rb") as forest: self.regressor = pickle.load(forest) from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier if not isinstance(self.regressor["scoring"], (RandomForestRegressor, RandomForestClassifier)): exc = TypeError("Invalid regressor provided, type: %s", type(self.regressor)) self.logger.critical(exc) self.exitcode = 9 self.join() self.logger.debug("Starting Process %s", self.name) self.logger.debug("Starting the pool for {0}".format(self.name)) try: self.engine = dbutils.connect(self.json_conf, self.logger) except KeyboardInterrupt: raise except EOFError: raise except Exception as exc: self.logger.exception(exc) return self.analyse_locus = functools.partial( analyse_locus, json_conf=self.json_conf, engine=self.engine, logging_queue=self.logging_queue)
def setup_queue_logging(log_queue: Queue, level: int) -> None: queue_handler = handlers.QueueHandler(log_queue) logging.basicConfig( level=level, handlers=[queue_handler], ) logger = logging.getLogger() logger.debug('Logging initialized')
def _setup_task_process(mp_log_q): # Setting up logging and cfg, needed since this is a new process cfg.CONF(sys.argv[1:], project='coriolis', version="1.0.0") utils.setup_logging() # Log events need to be handled in the parent process log_root = logging.getLogger(None).logger for handler in log_root.handlers: log_root.removeHandler(handler) log_root.addHandler(handlers.QueueHandler(mp_log_q))
def ctx() -> Generator[None, None, None]: logger_queue: "Queue[logging.LogRecord]" = Queue() logger = logging.getLogger() queue_handler = handlers.QueueHandler(logger_queue) logger.addHandler(queue_handler) yield logger.removeHandler(queue_handler) while not logger_queue.empty(): log_record: logging.LogRecord = logger_queue.get() # Reason: To hack. pylint: disable=protected-access logger.handle(log_record)
def get_logger(self, logger_name): queue_handler = handlers.QueueHandler(self._queue) queue_handler.setFormatter(_formatter) logger = logging.getLogger(logger_name) logger.handlers.clear() logger.addHandler(queue_handler) logger.setLevel(logging.INFO) logger.propagate = False # don't propagate to the root logger. return LoggerAdapter(logger, _extra_dict)
def runHandler(self): if self._STARTED: logger.warn('Log Queue Handler already started') return logger.info('Log Queue Handler starting') self._STARTED = True mainLogger = logging.getLogger("SpeakReader") self.queueHandler = handlers.QueueHandler(self._receiverQueue) self.queueHandler.setFormatter(logger.log_format) self.queueHandler.setLevel(logger.log_level) mainLogger.addHandler(self.queueHandler) for handler in mainLogger.handlers[:]: if isinstance(handler, handlers.RotatingFileHandler): self.fileName = handler.baseFilename break while self._STARTED: try: logRecord = self._receiverQueue.get(timeout=2) if logRecord is None: break # Python 3.6.8 doesn't seem to return a formatted message while 3.7.3 does. logMessage = logRecord.getMessage() formatted_logMessage = self.queueHandler.format(logRecord) logRecord.msg = "" formatted_header = self.queueHandler.format(logRecord) if formatted_header not in logMessage: logMessage = formatted_logMessage data = { "event": "logrecord", "final": True, "record": logMessage, } except queue.Empty: if self._STARTED: data = {"event": "ping"} else: break for queueElement in list(self._listenerQueues.values()): try: queueElement.put_nowait(data) except queue.Full: self.removeListener(sessionID=queueElement.sessionID) self._STARTED = False self.closeAllListeners() logger.info('Log Queue Handler terminated')
def test_to_device_warnings(dedup_between_calls) -> None: queue = multiprocessing.Queue() logger = logging.getLogger() # Capture warning logs as elements in a queue. logger.addHandler(handlers.QueueHandler(queue)) warned_types = set() if dedup_between_calls else None to_device(["string_data", "string_data"], "cpu", warned_types) to_device(["string_data", "string_data"], "cpu", warned_types) assert queue.qsize() == 1 if dedup_between_calls else 2 while queue.qsize(): msg = queue.get().message assert "not able to move data" in msg
def create_logger(queue) -> StageLogger: logger = logging.getLogger("pdf") logger.setLevel(level=logging.INFO) stage_level = logging.INFO + 5 def stage(self: StageLogger, msg, *args, **kwargs): if self.isEnabledFor(stage_level): self._log(stage_level, msg, args, **kwargs) logger.stage = stage.__get__(logger, None) if queue: logger.addHandler(handlers.QueueHandler(queue)) # noinspection PyTypeChecker return logger
def test_to_device_warnings(dedup_between_calls) -> None: # Capture warning logs as elements in a queue. logger = logging.getLogger() q = queue.Queue() handler = handlers.QueueHandler(q) logger.addHandler(handler) try: warned_types = set() if dedup_between_calls else None to_device(["string_data", "string_data"], "cpu", warned_types) to_device(["string_data", "string_data"], "cpu", warned_types) assert q.qsize() == 1 if dedup_between_calls else 2 while q.qsize(): msg = q.get().message assert "not able to move data" in msg finally: # Restore logging as it was before. logger.removeHandler(handler)
def define_logger(self): self._define_trace_logger() FORMAT = '%(asctime)-15s %(module)s-%(levelname)s: %(message)s' queue = Queue(-1) queue_handler = handlers.QueueHandler(queue) handler = logging.StreamHandler() handler.setLevel(self._log_level) listener = handlers.QueueListener(queue, handler) logger_name = 'accasim' logger = logging.getLogger(logger_name) logger.addHandler(queue_handler) formatter = logging.Formatter(FORMAT) handler.setFormatter(formatter) logger.setLevel(getattr(logging, self._log_level)) self.constants.load_constant('LOGGER_NAME', logger_name) return logger, listener
def setup_logging(): root_logger = l.getLogger() root_logger.setLevel(options.v) file_formatter = l.Formatter( "%(asctime)s [%(threadName)-10.10s] [%(levelname)-5.5s]: %(message)s") file_handler = l.FileHandler(environ.get('HOME', '.') + '/.sapfy.log') file_handler.setFormatter(file_formatter) log_formatter = l.Formatter("%(asctime)s [%(levelname)-5.5s]: %(message)s", datefmt='%H:%M:%S') console_handler = l.StreamHandler() console_handler.setFormatter(log_formatter) log_queue = queue.Queue(-1) queue_handler = lh.QueueHandler(log_queue) queue_listener = lh.QueueListener(log_queue, console_handler, file_handler) root_logger.addHandler(queue_handler) queue_listener.start() return queue_listener
def __setstate__(self, state): self.__dict__.update(state) self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.json_conf["log_settings"]["log_level"]) self.logger.propagate = False self._create_handles(self.__output_files) if self.json_conf["pick"]["run_options"]["preload"] is False: self.engine = dbutils.connect(self.json_conf, self.logger) else: self.engine = None self.analyse_locus = functools.partial(analyse_locus, printer_queue=None, json_conf=self.json_conf, data_dict=self.__data_dict, engine=self.engine, logging_queue=self.logging_queue)
def scroll_text(window): """ ScrolledText that shows logs. """ def poll(): while True: try: msg = log_queue.get(block=False) level = msg.levelname msg = formatter.format(msg) + '\n' scroll.insert('end', msg, level) scroll.yview('end') except queue.Empty: break # to avoid orphan poll() if log.hasHandlers(): scroll.after(10, poll) else: return # clean old handlers for i in log.handlers: log.removeHandler(i) log_queue = queue.Queue() formatter = logging.Formatter(fmt=FMT, datefmt=DATEFMT) # do not add formatter to queuehandler, or msg will be formatted twice queue_handler = handlers.QueueHandler(log_queue) # give poll() time to quit root.after(100, log.addHandler(queue_handler)) scroll = scrolledtext.ScrolledText(window) scroll.tag_config('INFO', foreground='black') scroll.tag_config('WARNING', foreground='orange') scroll.tag_config('ERROR', foreground='red') scroll.tag_config('CRITICAL', foreground='red') scroll.tag_config('EXCEPTION', foreground='red') scroll.pack(fill='both') scroll.after(0, poll)
def __init__( self, configuration, locus_queue, logging_queue, status_queue, identifier, ): super(LociProcesser, self).__init__() configuration = load_and_validate_config( msgpack.loads(configuration, raw=False)) self.logging_queue = logging_queue self.status_queue = status_queue self.__identifier = identifier # Property directly unsettable self.name = "LociProcesser-{0}".format(self.identifier) self.configuration = configuration for section in (self.configuration.scoring.requirements, self.configuration.scoring.as_requirements, self.configuration.scoring.cds_requirements, self.configuration.scoring.not_fragmentary): # Compile the expression _ = section.compiled self.engine = None self.session = None self.handler = logging_handlers.QueueHandler(self.logging_queue) self.logger = logging.getLogger(self.name) self.logger.addHandler(self.handler) self.logger.setLevel(self.configuration.log_settings.log_level) self.logger.propagate = False self.locus_queue = locus_queue self.logger.debug("Starting Process %s", self.name) self.logger.debug("Starting the pool for {0}".format(self.name))
def run(self): import quantnn.logging root = logging.getLogger() root.handlers = [handlers.QueueHandler(self.log_queue)]
def __init__(self, xml_name, logger=None, json_conf=None): """Initializing method. Arguments: :param xml_name: The XML(s) to parse. Arguments: :param json_conf: a configuration dictionary. :type json_conf: dict """ if json_conf is None: raise ValueError("No configuration provided!") if logger is not None: self.logger = check_logger(logger) else: raise ValueError("No logger provided!") self.logger.debug("Started to serialise %s, log level: %s", xml_name, self.logger.level) # Runtime arguments self.procs = json_conf["threads"] self.single_thread = json_conf["serialise"]["single_thread"] self.json_conf = json_conf # pylint: disable=unexpected-argument,E1123 multiprocessing.set_start_method( self.json_conf["multiprocessing_method"], force=True) # pylint: enable=unexpected-argument,E1123 self.logging_queue = multiprocessing.Queue(-1) self.logger_queue_handler = logging_handlers.QueueHandler( self.logging_queue) self.queue_logger = logging.getLogger("parser") self.queue_logger.addHandler(self.logger_queue_handler) self.queue_logger.setLevel(self.json_conf["log_settings"]["log_level"]) self.queue_logger.propagate = False self.log_writer = logging_handlers.QueueListener( self.logging_queue, self.logger) self.log_writer.start() self.__max_target_seqs = json_conf["serialise"]["max_target_seqs"] self.maxobjects = json_conf["serialise"]["max_objects"] target_seqs = json_conf["serialise"]["files"]["blast_targets"] query_seqs = json_conf["serialise"]["files"]["transcripts"] self.header = None if xml_name is None: self.logger.warning("No BLAST XML provided. Exiting.") return self.engine = connect(json_conf) # session = sessionmaker(autocommit=True) DBBASE.metadata.create_all(self.engine) # @UndefinedVariable session = Session(bind=self.engine, autocommit=False, autoflush=False, expire_on_commit=False) self.session = session # session() self.logger.debug("Created the session") # Load sequences if necessary self.__determine_sequences(query_seqs, target_seqs) self.xml = xml_name # Just a mock definition # self.get_query = functools.partial(self.__get_query_for_blast) self.not_pickable = [ "manager", "printer_process", "context", "logger_queue_handler", "queue_logger", "log_writer", "log_process", "pool", "main_logger", "log_handler", "log_writer", "logger", "session", "get_query", "engine", "query_seqs", "target_seqs" ] self.queries, self.targets = dict(), dict() self.logger.debug("Finished __init__")
def root_configurer(queue): h = handlers.QueueHandler(queue) root = logging.getLogger() root.addHandler(h) root.setLevel(logging.ERROR)