Пример #1
0
def setupLogListener():
    global __logging_queue
    global __listener

    # rename the log levels with COLORFUL versions of themselves
    # because COLORS.
    logging.addLevelName(50, "\033[1;31mCRITICAL\033[0m")
    logging.addLevelName(40, "\033[0;31mERROR   \033[0m")
    logging.addLevelName(30, "\033[0;33mWARNING \033[0m")
    logging.addLevelName(20, "\033[0;37mINFO    \033[0m")
    logging.addLevelName(10, "\033[0;34mDEBUG   \033[0m")

    q = Queue()

    console_handler = logging.StreamHandler()
    # includes terminfo color codes for easier visual grepping
    detailed_formatter = logging.Formatter(
        '{levelname}[{asctime}.{msecs:03.0f}] line \033[1m{lineno:4d}\033[0m in \033[1;30m{funcName:40}\033[0m: \033[0;37m{message}\033[0m',
        datefmt='%H:%M:%S',
        style='{')
    # detailed_formatter = logging.Formatter('%(asctime)s %(name)-15s %(levelname)-8s %(message)s')
    # plain_formatter = logging.Formatter('%(asctime)s %(message)s')

    console_handler.setFormatter(detailed_formatter)
    # console_handler.setFormatter(plain_formatter)

    q_listener = handlers.QueueListener(q, console_handler)

    __logging_queue = q
    __listener = q_listener

    q_listener.start()
Пример #2
0
    def replace_with_queue_handler(self):
        """ setup a central queue handler and start a listener thread """
        all_loggers = [root_logger] + [
            logging.getLogger(name) for name in root_logger.manager.loggerDict
        ]

        for logger in all_loggers:
            if not logger.handlers:
                continue

            # backup current handlers then clear it
            all_handlers = logger.handlers
            logger.handlers = []

            # add queue handler
            queue = self.qclass()
            q_handler = handlers.QueueHandler(queue)
            logger.addHandler(q_handler)

            ql = handlers.QueueListener(queue,
                                        *all_handlers,
                                        respect_handler_level=True)
            self.q_listeners.append(ql)

            # start listening
            atexit.register(ql.stop)
            ql.start()

        root_logger.debug('Logging queue listener started!')
Пример #3
0
def setup_logger(args):

    """
    Function to setup the logger for the compare function.
    :param args:
    :param manager:
    :return:
    """

    args.log_queue = mp.Queue(-1)
    args.queue_handler = log_handlers.QueueHandler(args.log_queue)

    if args.log is not None:
        _log_folder = os.path.dirname(args.log)
        if _log_folder and not os.path.exists(_log_folder):
            os.makedirs(_log_folder)
        handle = open(args.log, mode="wt")
        handle.close()
        handler = logging.FileHandler(handle.name)
        logger = logging.getLogger("main_compare")
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.propagate = False
    else:
        logger = create_default_logger("main_compare")
        handler = logger.handlers[0]

    if args.verbose is False:
        logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.DEBUG)

    logger.propagate = False
    log_queue_listener = log_handlers.QueueListener(args.log_queue, logger)
    log_queue_listener.propagate = False
    log_queue_listener.start()

    queue_logger = logging.getLogger("main_queue")
    for handler in queue_logger.handlers:
        queue_logger.removeHandler(handler)
    if args.verbose is False:
        queue_logger.setLevel(logging.INFO)
    else:
        queue_logger.setLevel(logging.DEBUG)
    main_queue_handler = log_handlers.QueueHandler(args.log_queue)
    queue_logger.propagate = False
    queue_logger.addHandler(main_queue_handler)

    return args, handler, logger, log_queue_listener, queue_logger
Пример #4
0
def setup_logger(args, manager):

    """
    Function to setup the logger for the compare function.
    :param args:
    :param manager:
    :return:
    """

    logger = create_default_logger("main_compare")
    args.log_queue = manager.Queue()
    args.queue_handler = log_handlers.QueueHandler(args.log_queue)
    log_queue_listener = log_handlers.QueueListener(args.log_queue, logger)
    log_queue_listener.propagate = False
    log_queue_listener.start()

    if args.log is not None:
        if os.path.exists(args.log):
            os.remove(args.log)
        handler = logging.FileHandler(args.log)
        handler.setFormatter(logger.handlers[0].formatter)
        # Remove stream handler
        logger.removeHandler(logger.handlers[0])
        logger.addHandler(handler)
    else:
        handler = logger.handlers[0]

    if args.verbose is False:
        logger.setLevel(logging.INFO)
    else:
        logger.setLevel(logging.DEBUG)

    logger.propagate = False

    queue_logger = logging.getLogger("main_queue")
    for handler in queue_logger.handlers:
        queue_logger.removeHandler(handler)
    if args.verbose is False:
        queue_logger.setLevel(logging.INFO)
    else:
        queue_logger.setLevel(logging.DEBUG)
    main_queue_handler = log_handlers.QueueHandler(args.log_queue)
    queue_logger.propagate = False
    queue_logger.addHandler(main_queue_handler)

    return args, handler, logger, log_queue_listener, queue_logger
Пример #5
0
    def _init_consume_logger(self, helper_name, log_dir):
        file_logger = logging.getLogger(helper_name)

        for level in _levels_to_record:
            file_path = "{}/{}.log.{}".format(log_dir, helper_name,
                                              level["name"])
            handler = handlers.TimedRotatingFileHandler(file_path,
                                                        backupCount=180,
                                                        encoding="utf-8",
                                                        when="midnight")
            handler.suffix = "%Y-%m-%d"
            handler.setLevel(logging.ERROR)
            file_logger.addHandler(handler)

        log_queue_listener = handlers.QueueListener(self._queue,
                                                    *file_logger.handlers,
                                                    respect_handler_level=True)
        log_queue_listener.start()
Пример #6
0
    def define_logger(self):
        self._define_trace_logger()
        FORMAT = '%(asctime)-15s %(module)s-%(levelname)s: %(message)s'

        queue = Queue(-1)
        queue_handler = handlers.QueueHandler(queue)
        handler = logging.StreamHandler()
        handler.setLevel(self._log_level)
        listener = handlers.QueueListener(queue, handler)

        logger_name = 'accasim'
        logger = logging.getLogger(logger_name)
        logger.addHandler(queue_handler)
        formatter = logging.Formatter(FORMAT)
        handler.setFormatter(formatter)
        logger.setLevel(getattr(logging, self._log_level))

        self.constants.load_constant('LOGGER_NAME', logger_name)
        return logger, listener
Пример #7
0
def setup_trinity_logging(level):
    from .mp import ctx

    log_queue = ctx.Queue()

    logging.basicConfig(level=level)
    logger = logging.getLogger('trinity')

    handler = logging.StreamHandler(sys.stdout)

    formatter = logging.Formatter(
        '%(levelname)s %(name)s %(asctime)s - %(message)s')
    handler.setFormatter(formatter)

    logger.setLevel(logging.DEBUG)
    logger.addHandler(handler)

    listener = handlers.QueueListener(log_queue, logger)

    return logger, log_queue, listener
Пример #8
0
def setup_logging():
    root_logger = l.getLogger()
    root_logger.setLevel(options.v)

    file_formatter = l.Formatter(
        "%(asctime)s [%(threadName)-10.10s] [%(levelname)-5.5s]: %(message)s")
    file_handler = l.FileHandler(environ.get('HOME', '.') + '/.sapfy.log')
    file_handler.setFormatter(file_formatter)

    log_formatter = l.Formatter("%(asctime)s [%(levelname)-5.5s]: %(message)s",
                                datefmt='%H:%M:%S')
    console_handler = l.StreamHandler()
    console_handler.setFormatter(log_formatter)

    log_queue = queue.Queue(-1)
    queue_handler = lh.QueueHandler(log_queue)
    queue_listener = lh.QueueListener(log_queue, console_handler, file_handler)
    root_logger.addHandler(queue_handler)
    queue_listener.start()
    return queue_listener
Пример #9
0
    def __init__(self, queue, default_path='logging.yaml'):
        '''
        Init params:
        queue -- The queue to extract records from.
        default_path -- Default path where the logging configuration file resides.

        If the file is not present or another exception occurs, the failed attribute gets set to True.
        '''

        try:
            with open(default_path, 'rt') as f:
                config = yaml.safe_load(f.read())
                logging.config.dictConfig(config)
            self.failed = False
        except (IOError, Exception):
            self.failed = True

        logger = logging.getLogger()
        hdlrs = logger.handlers
        self._listener = handlers.QueueListener(queue,
                                                *hdlrs,
                                                respect_handler_level=True)
Пример #10
0
def setup_helios_logging(
        level: int) -> Tuple[Logger, Queue, handlers.QueueListener]:
    from mp import ctx

    log_queue = ctx.Queue()

    logging.basicConfig(level=level)
    logger = logging.getLogger('helios')

    handler = logging.StreamHandler(sys.stdout)

    # TODO: allow configuring `detailed` logging
    formatter = logging.Formatter(
        fmt='%(levelname)8s  %(asctime)s  %(module)10s  %(message)s',
        datefmt='%m-%d %H:%M:%S')
    handler.setFormatter(formatter)

    logger.setLevel(logging.DEBUG)
    logger.addHandler(handler)

    listener = handlers.QueueListener(log_queue, handler)

    return logger, log_queue, listener
Пример #11
0
    def setup_logger(self):

        """This function sets up the logger for the class.
        It creates the instance attribute "log_writer", which is itself a
        logging.handlers.QueueListener instance listening on the logging_queue
        instance attribute (which is a normal mp.Manager.Queue instance)."""

        self.logging_queue = multiprocessing.Queue(-1)
        self.printer_queue = multiprocessing.Queue(-1)
        self.formatter = formatter
        self.main_logger = logging.getLogger("main_logger")
        if not os.path.exists(self.configuration.pick.files.output_dir):
            try:
                os.makedirs(self.configuration.pick.files.output_dir)
            except (OSError, PermissionError) as exc:
                self.logger.error("Failed to create the output directory!")
                self.logger.exception(exc)
                raise
        elif not os.path.isdir(self.configuration.pick.files.output_dir):
            self.logger.error(
                "The specified output directory %s exists and is not a file; aborting",
                self.configuration.pick.files.output_dir)
            raise OSError("The specified output directory %s exists and is not a file; aborting" %
                          self.configuration.pick.files.output_dir)

        self.logger = logging.getLogger("listener")
        self.logger.propagate = False
        if (self.configuration.pick.files.log is None or
                self.configuration.pick.files.log in ("stream", "")):
            self.log_handler = logging.StreamHandler()
        else:
            if os.path.basename(self.configuration.pick.files.log) == self.configuration.pick.files.log:
                fname = path_join(self.configuration.pick.files.output_dir,
                                  self.configuration.pick.files.log)
            else:
                fname = self.configuration.pick.files.log

            self.log_handler = logging.FileHandler(filename=fname, mode='w')
            assert os.path.exists(fname)

        # For the main logger I want to keep it at the "INFO" level
        self.log_level = self.configuration.log_settings.log_level
        self.log_handler.setFormatter(self.formatter)
        self.logger.setLevel(self.log_level)
        self.logger.addHandler(self.log_handler)

        if self.log_level == "DEBUG" and self.configuration.threads > 1:
            self.main_logger.setLevel(logging.DEBUG)
            self.main_logger.warning(
                    "Due to a Python design bug, we have to force Mikado to go in single-threaded mode when debugging.")
            self.procs = self.configuration.threads = 1
        else:
            self.main_logger.setLevel(logging.INFO)
        self.main_logger.addHandler(self.log_handler)

        self.main_logger.info(f"Mikado version: {version.__version__}")
        if self.commandline != '':
            self.main_logger.info("Command line: {0}".format(self.commandline))
        else:
            self.main_logger.info(
                "Analysis launched directly, without using the launch script.")

        self.main_logger.info("Begun analysis of {0}".format(self.input_file))
        # Create the shared DB if necessary
        self.log_writer = logging_handlers.QueueListener(self.logging_queue, self.logger)
        self.log_writer.start()

        self.logger_queue_handler = logging_handlers.QueueHandler(self.logging_queue)
        self.queue_logger = logging.getLogger("parser")
        self.queue_logger.addHandler(self.logger_queue_handler)

        self.queue_logger.setLevel(logging.getLevelName(self.configuration.log_settings.log_level))
        self.logger.warning("Current level for queue: %s", logging.getLevelName(self.queue_logger.level))

        self.queue_logger.propagate = False

        # Configure SQL logging
        sqllogger = logging.getLogger("sqlalchemy.engine")
        sqllogger.setLevel(self.configuration.log_settings.sql_level)
        sqllogger.addHandler(self.logger_queue_handler)

        return
Пример #12
0
    def __init__(self, xml_name, logger=None, json_conf=None):
        """Initializing method. Arguments:

        :param xml_name: The XML(s) to parse.

        Arguments:

        :param json_conf: a configuration dictionary.
        :type json_conf: dict


        """

        if json_conf is None:
            raise ValueError("No configuration provided!")

        if logger is not None:
            self.logger = check_logger(logger)
        else:
            raise ValueError("No logger provided!")
        self.logger.debug("Started to serialise %s, log level: %s", xml_name,
                          self.logger.level)

        # Runtime arguments

        self.procs = json_conf["threads"]
        self.single_thread = json_conf["serialise"]["single_thread"]
        self.json_conf = json_conf
        # pylint: disable=unexpected-argument,E1123
        multiprocessing.set_start_method(
            self.json_conf["multiprocessing_method"], force=True)
        # pylint: enable=unexpected-argument,E1123
        self.logging_queue = multiprocessing.Queue(-1)
        self.logger_queue_handler = logging_handlers.QueueHandler(
            self.logging_queue)
        self.queue_logger = logging.getLogger("parser")
        self.queue_logger.addHandler(self.logger_queue_handler)
        self.queue_logger.setLevel(self.json_conf["log_settings"]["log_level"])
        self.queue_logger.propagate = False
        self.log_writer = logging_handlers.QueueListener(
            self.logging_queue, self.logger)
        self.log_writer.start()

        self.__max_target_seqs = json_conf["serialise"]["max_target_seqs"]
        self.maxobjects = json_conf["serialise"]["max_objects"]
        target_seqs = json_conf["serialise"]["files"]["blast_targets"]
        query_seqs = json_conf["serialise"]["files"]["transcripts"]

        self.header = None
        if xml_name is None:
            self.logger.warning("No BLAST XML provided. Exiting.")
            return

        self.engine = connect(json_conf)

        # session = sessionmaker(autocommit=True)
        DBBASE.metadata.create_all(self.engine)  # @UndefinedVariable
        session = Session(bind=self.engine,
                          autocommit=False,
                          autoflush=False,
                          expire_on_commit=False)
        self.session = session  # session()
        self.logger.debug("Created the session")
        # Load sequences if necessary
        self.__determine_sequences(query_seqs, target_seqs)
        self.xml = xml_name
        # Just a mock definition
        # self.get_query = functools.partial(self.__get_query_for_blast)
        self.not_pickable = [
            "manager", "printer_process", "context", "logger_queue_handler",
            "queue_logger", "log_writer", "log_process", "pool", "main_logger",
            "log_handler", "log_writer", "logger", "session", "get_query",
            "engine", "query_seqs", "target_seqs"
        ]

        self.queries, self.targets = dict(), dict()

        self.logger.debug("Finished __init__")
Пример #13
0
def listener_configurer(queue_log_record: queue.Queue[LogRecord]) -> handlers.QueueListener:
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(logging.Formatter("[%(levelname)s/%(processName)s] %(message)s"))
    return handlers.QueueListener(queue_log_record, console_handler)
Пример #14
0
    def __init__(self, xml_name,
                 logger=None,
                 configuration=None):
        """Initializing method. Arguments:

        :param xml_name: The XML(s) to parse.

        Arguments:

        :param configuration: a configuration dictionary.
        :type configuration: (MikadoConfiguration|DaijinConfiguration)


        """

        if configuration is None:
            raise ValueError("No configuration provided!")

        if logger is not None:
            self.logger = check_logger(logger)
        else:
            raise ValueError("No logger provided!")
        self.logger.debug("Started to serialise %s, log level: %s",
                         xml_name, self.logger.level)

        # Runtime arguments

        self.procs = configuration.threads
        self.single_thread = configuration.serialise.single_thread
        self.configuration = configuration
        # pylint: disable=unexpected-argument,E1123
        multiprocessing.set_start_method(self.configuration.multiprocessing_method, force=True)
        # pylint: enable=unexpected-argument,E1123
        self.logger.info("Number of dedicated workers: %d", self.procs)
        self.logging_queue = multiprocessing.Queue(-1)
        self.logger_queue_handler = logging_handlers.QueueHandler(self.logging_queue)
        self.queue_logger = logging.getLogger("parser")
        self.queue_logger.addHandler(self.logger_queue_handler)
        self.queue_logger.setLevel(self.configuration.log_settings.log_level)
        self.queue_logger.propagate = False
        self.log_writer = logging_handlers.QueueListener(self.logging_queue, self.logger)
        self.log_writer.start()

        self._max_target_seqs = configuration.serialise.max_target_seqs
        self.maxobjects = configuration.serialise.max_objects
        target_seqs = configuration.serialise.files.blast_targets
        query_seqs = configuration.serialise.files.transcripts

        self.header = None
        if xml_name is None:
            self.logger.warning("No BLAST XML provided. Exiting.")
            return

        self.engine = connect(configuration)

        self._blast_loading_debug = self.configuration.serialise.files.blast_loading_debug
        if self._blast_loading_debug:
            self.logger.warning("Activating the XML debug mode")
            self.single_thread = True
            self.procs = 1

        DBBASE.metadata.create_all(self.engine)  # @UndefinedVariable
        session = Session(bind=self.engine)
        self.session = session
        self.hit_i_string = str(Hit.__table__.insert(bind=self.engine).compile())
        self.hsp_i_string = str(Hsp.__table__.insert(bind=self.engine).compile())
        # Remove indices
        self.logger.debug("Created the session")
        # Load sequences if necessary
        self.__determine_sequences(query_seqs, target_seqs)
        self.xml = xml_name
        # Just a mock definition
        self.not_pickable = ["manager", "printer_process",
                             "context", "logger_queue_handler", "queue_logger",
                             "log_writer",
                             "log_process", "pool", "main_logger",
                             "log_handler", "log_writer", "logger", "session",
                             "get_query", "engine", "query_seqs", "target_seqs"]

        self.queries, self.targets = dict(), dict()

        self.logger.debug("Finished __init__")
Пример #15
0
    def setup_logger(self):
        """This function sets up the logger for the class.
        It creates the instance attribute "log_writer", which is itself a
        logging.handlers.QueueListener instance listening on the logging_queue
        instance attribute (which is a normal mp.Manager.Queue instance)."""

        self.formatter = formatter
        self.main_logger = logging.getLogger("main_logger")
        if not os.path.exists(self.json_conf["pick"]["files"]["output_dir"]):
            try:
                os.makedirs(self.json_conf["pick"]["files"]["output_dir"])
            except (OSError, PermissionError) as exc:
                self.logger.error("Failed to create the output directory!")
                self.logger.exception(exc)
                raise
        elif not os.path.isdir(self.json_conf["pick"]["files"]["output_dir"]):
            self.logger.error(
                "The specified output directory %s exists and is not a file; aborting",
                self.json_conf["pick"]["files"]["output_dir"])
            raise OSError(
                "The specified output directory %s exists and is not a file; aborting"
                % self.json_conf["pick"]["files"]["output_dir"])

        self.logger = logging.getLogger("listener")
        self.logger.propagate = False
        if (self.json_conf["pick"]["files"]["log"] is None
                or self.json_conf["pick"]["files"]["log"] == "stream"):
            self.log_handler = logging.StreamHandler()
        else:
            self.log_handler = logging.FileHandler(
                path_join(self.json_conf["pick"]["files"]["output_dir"],
                          self.json_conf["pick"]["files"]["log"]), 'w')
        # For the main logger I want to keep it at the "INFO" level
        self.log_level = self.json_conf["log_settings"]["log_level"]

        self.log_handler.setFormatter(self.formatter)
        self.logger.setLevel(self.log_level)
        self.logger.addHandler(self.log_handler)

        if self.log_level == "DEBUG":
            self.main_logger.setLevel(logging.DEBUG)
        else:
            self.main_logger.setLevel(logging.INFO)
        self.main_logger.addHandler(self.log_handler)

        self.main_logger.info("Begun analysis of {0}".format(self.input_file))
        if self.commandline != '':
            self.main_logger.info("Command line: {0}".format(self.commandline))
        else:
            self.main_logger.info(
                "Analysis launched directly, without using the launch script.")

        # Create the shared DB if necessary
        self.setup_shm_db()

        if self.json_conf["pick"]["chimera_split"]["blast_check"] is True and \
                self.json_conf["log_settings"]["log_level"] == "DEBUG":
            engine = dbutils.connect(self.json_conf, self.main_logger)
            smaker = sessionmaker()
            smaker.configure(bind=engine)
            session = smaker()

            evalue = self.json_conf["pick"]["chimera_split"]["blast_params"][
                "evalue"]
            queries_with_hits = session.query(
                Hit.query_id).filter(Hit.evalue <= evalue).distinct().count()
            total_queries = session.query(Query).count()
            self.main_logger.debug(
                "Queries with at least one hit at evalue<=%f: %d out of %d (%f%%)",
                evalue, queries_with_hits, total_queries,
                0 if total_queries == 0 else round(
                    100 * queries_with_hits / total_queries, 2))
            session.close()

        self.log_writer = logging_handlers.QueueListener(
            self.logging_queue, self.logger)
        self.log_writer.start()

        return
Пример #16
0
    def __serialize_multiple_threads(self):
        """"""

        send_queue = mp.JoinableQueue(-1)
        return_queue = mp.JoinableQueue(-1)
        self.logging_queue = mp.Queue(-1)
        self.logger_queue_handler = logging_handlers.QueueHandler(
            self.logging_queue)
        self.queue_logger = logging.getLogger("parser")
        self.queue_logger.addHandler(self.logger_queue_handler)
        self.queue_logger.setLevel(self.log_level)
        self.queue_logger.propagate = False
        self.log_writer = logging_handlers.QueueListener(
            self.logging_queue, self.logger)
        self.log_writer.start()

        parsers = [
            bed12.Bed12ParseWrapper(rec_queue=send_queue,
                                    log_queue=self.logging_queue,
                                    level=self.log_level,
                                    return_queue=return_queue,
                                    fasta_index=self.fasta_index.filename,
                                    is_gff=(not self.is_bed12),
                                    transcriptomic=True,
                                    max_regression=self._max_regression,
                                    table=self._table)
            for _ in range(self.procs)
        ]

        [_.start() for _ in parsers]

        for line in open(self._handle):
            send_queue.put(line.encode())
        send_queue.put("EXIT")
        not_found = set()
        done = 0
        objects = []
        procs_done = 0
        while True:
            object = return_queue.get()
            if object in ("FINISHED", b"FINISHED"):
                procs_done += 1
                if procs_done == self.procs:
                    break
                else:
                    continue
            try:
                object = msgpack.loads(object, raw=False)
            except TypeError:
                raise TypeError(object)

            if object["id"] in self.cache:
                current_query = self.cache[object["id"]]
            elif not self.initial_cache:
                current_query = Query(object["id"], object["end"])
                not_found.add(object["id"])
                self.session.add(current_query)
                self.session.commit()
                self.cache[current_query.query_name] = current_query.query_id
                current_query = current_query.query_id
            else:
                self.logger.critical(
                    "The provided ORFs do not match the transcripts provided and already present in the database.\
This could be due to having called the ORFs on a FASTA file different from `mikado_prepared.fasta`, the output of \
mikado prepare. If this is the case, please use mikado_prepared.fasta to call the ORFs and then restart \
`mikado serialise` using them as input.")
                raise InvalidSerialization

            object["query_id"] = current_query
            objects.append(object)
            if len(objects) >= self.maxobjects:
                done += len(objects)
                self.session.begin(subtransactions=True)
                # self.session.bulk_save_objects(objects)
                self.engine.execute(Orf.__table__.insert(), objects)
                self.session.commit()
                self.logger.debug("Loaded %d ORFs into the database", done)
                objects = []

        [proc.join() for proc in parsers]
        done += len(objects)
        # self.session.begin(subtransactions=True)
        # self.session.bulk_save_objects(objects, update_changed_only=False)
        if objects:
            self.engine.execute(Orf.__table__.insert(), objects)
        return_queue.close()
        send_queue.close()
        self.session.commit()
        self.session.close()
        self.logger.info("Finished loading %d ORFs into the database", done)

        orfs = pd.read_sql_table("orf", self.engine, index_col="query_id")
        if orfs.shape[0] != done:
            raise ValueError(
                "I should have serialised {} ORFs, but {} are present!".format(
                    done, orfs.shape[0]))
Пример #17
0
def debug(log_dir='./'):
    """
    Enables some debugging utilities for logging and pdb.

    Includes:

    * Automatically dropping into a post-mortem pdb debugger session
    whenever an exception is raised.
    * Enables fast DEBUG logging to a logging file via QueueHandler.
    * Copies all stdout output to the logging file. (Experimental)

    **References**

    1. Automatically start the debugger on an exception (Python recipe), Thomas Heller, 2001,
        [Link](http://code.activestate.com/recipes/65287-automatically-start-the-debugger-on-an-exception/)
    2. Dealing with handlers that block, Python Documentation, 2019.
        [Link](https://docs.python.org/3/howto/logging-cookbook.html#dealing-with-handlers-that-block)

    **Arguments**

    * **log_dir** (str, *optional*, Default: './') - Location to store the log files.

    **Example**

    ~~~python
    ch.debug.debug()
    raise Exception('My exception')
    -> raise('My exception')
    (Pdb)
    ~~~

    """
    global IS_DEBUGGING
    if not IS_DEBUGGING:
        # Enable debugging logging.
        now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
        log_file = os.path.join(log_dir, 'cherry_debug_' + now + '.log')
        if not os.path.exists(log_dir):
            os.mkdir(log_dir)

        # Experimental: forward stdout/print to log_file too
        log_file = open(log_file, mode='a', buffering=1, encoding='utf-8')
        stdout_write = sys.stdout.write
        stderr_write = sys.stderr.write

        def custom_stdout_write(*args, **kwargs):
            stdout_write(*args, **kwargs)
            log_file.write(*args, **kwargs)

        def custom_stderr_write(*args, **kwargs):
            stderr_write(*args, **kwargs)
            log_file.write(*args, **kwargs)

        def custom_newline_stdout(*args, **kwargs):
            custom_stdout_write(*args, **kwargs)
            custom_stdout_write('\n')

        global print
        print = custom_newline_stdout
        sys.stdout.write = custom_stdout_write
        sys.stderr.write = custom_stderr_write

        # Log to file using queue handler and listener
        logger.setLevel(logging.DEBUG)
        debug_queue = queue.Queue(-1)
        queue_handler = handlers.QueueHandler(debug_queue)
        logger.addHandler(queue_handler)
        debug_fmt = logging.Formatter(
            fmt='%(asctime)s - %(name)s - %(levelname)s \n%(message)s',
            datefmt='%Y-%m-%d %H:%M:%S')
        debug_handler = logging.StreamHandler(log_file)
        debug_handler.setFormatter(debug_fmt)
        debug_handler.setLevel(logging.DEBUG)
        queue_listener = handlers.QueueListener(debug_queue, debug_handler)
        queue_listener.start()
        logger.debug('Debugging started.')

        # Enable automatic post-mortem on Exception.
        def info(type, value, tb):
            if hasattr(sys, 'ps1') or not sys.stderr.isatty():
                sys.__excepthook__(type, value, tb)
            else:
                traceback.print_exception(type, value, tb)
                pdb.pm()

        sys.excepthook = info

        # Turn debug flag on.
        IS_DEBUGGING = True
Пример #18
0
# date 2020-03-12 19:53:17
# author calllivecn <*****@*****.**>

import os
import time
import logging
import multiprocessing as mp
from logging import handlers

que = mp.Queue(-1)

queue_handler = handlers.QueueHandler(que)

handler = logging.StreamHandler()

listener = handlers.QueueListener(que, handler)

logger = logging.getLogger()

fmt = logging.Formatter("%(asctime)s.%(msecs)d %(thread)s %(message)s",
                        datefmt="%Y-%m-%d-%H:%M%S")

handler.setFormatter(fmt)

handler.setLevel(logging.DEBUG)

logger.addHandler(queue_handler)

logger.setLevel(logging.DEBUG)