Exemplo n.º 1
0
def filter_by_level(
    logger: logging.Logger, method_name: str, event_dict: EventDict
) -> EventDict:
    """
    Check whether logging is configured to accept messages from this log level.

    Should be the first processor if stdlib's filtering by level is used so
    possibly expensive processors like exception formatters are avoided in the
    first place.

    >>> import logging
    >>> from structlog.stdlib import filter_by_level
    >>> logging.basicConfig(level=logging.WARN)
    >>> logger = logging.getLogger()
    >>> filter_by_level(logger, 'warn', {})
    {}
    >>> filter_by_level(logger, 'debug', {})
    Traceback (most recent call last):
    ...
    DropEvent
    """
    if logger.isEnabledFor(_NAME_TO_LEVEL[method_name]):
        return event_dict
    else:
        raise DropEvent
Exemplo n.º 2
0
def do_log(logger: logging.Logger, log_level: int) -> bool:
    """Whether to log the `log_level` to the given `logger` or not.
    
    Parameters
    ----------
    logger : logging.Logger
        The logger object
    log_level : int
        The log level to check

    Returns
    -------
    bool
        Whther to do the log or not
    """
    global __do_log_cache

    if not isinstance(logger, logging.Logger):
        return False

    if logger.name not in __do_log_cache:
        __do_log_cache[logger.name] = {}

    if log_level not in __do_log_cache[logger.name]:
        from .config import ENABLED_PROGRAM_LOG_LEVELS
        __do_log_cache[logger.name][log_level] = (
            logger.isEnabledFor(log_level)
            and log_level in ENABLED_PROGRAM_LOG_LEVELS)
    return __do_log_cache[logger.name][log_level]
Exemplo n.º 3
0
 def isEnabledFor(level):
     try:
         if level >= SpecificLevelLog[request.endpoint]:
             return True
         return False
     except:
         return Logger.isEnabledFor(self.app.logger, level)  
Exemplo n.º 4
0
def PygtailLogger(logger: logging.Logger,
                  filename: str,
                  prefix: str = "2| ") -> Iterator[Callable[[], None]]:
    """
    Helper for streaming task stderr into logger using pygtail. Context manager yielding a function
    which reads the latest lines from the file and writes them into logger at verbose level. This
    function also runs automatically on context exit.

    Truncates lines at 4KB in case writer goes haywire.
    """
    pygtail = None
    if logger.isEnabledFor(VERBOSE_LEVEL):
        pygtail = Pygtail(filename, full_lines=True)

    def poll() -> None:
        nonlocal pygtail
        if pygtail:
            try:
                for line in pygtail:
                    logger.verbose(
                        (prefix + line.rstrip())[:4096])  # pyre-ignore
            except:
                # cf. https://github.com/bgreenlee/pygtail/issues/48
                logger.verbose(  # pyre-ignore
                    "incomplete log stream due to the following exception; see %s",
                    filename,
                    exc_info=sys.exc_info(),
                )
                pygtail = None

    try:
        yield poll
    finally:
        poll()
Exemplo n.º 5
0
Arquivo: lib.py Projeto: OS-Q/S48
    def _save_config(config: configparser.ConfigParser, path: pathlib.Path,
                     logger: logging.Logger) -> int:
        """
        Writes the ConfigParser 'config' to the file 'path' and logs using the Logger 'logger'.

        We declare this helper function which can be safely invoked by both internal methods and outer code. The latter
        case is suitable for using in weakref' finalizer objects as one of its main requirement is to not keep
        references to the destroyable object in any of the finalizer argument so the ordinary bound class method does
        not fit well.

        Returns:
            0 on success, -1 otherwise
        """
        try:
            with path.joinpath(stm32pio.core.settings.config_file_name).open(
                    mode='w') as config_file:
                config.write(config_file)
            logger.debug(
                f"{stm32pio.core.settings.config_file_name} config file has been saved"
            )
            return 0
        except Exception as e:
            logger.warning(f"cannot save the config: {e}",
                           exc_info=logger.isEnabledFor(logging.DEBUG))
            return -1
Exemplo n.º 6
0
 def __init__(self, data: memoryview, logger: logging.Logger):
     self._file = io.BytesIO(data)
     self._size = len(data)
     self._enabled = logger.isEnabledFor(logging.INFO)
     if self._enabled:
         self._progress = progress.Bar(expected_size=self._size)
     else:
         logger.debug("Progress indication is not enabled")
Exemplo n.º 7
0
 def callHandlers(self, record):
     if self._db is not None:
         record.msg = record.getMessage()
         record.args = None
         if record.exc_info:
             record.msg = (record.msg and record.msg + '\n') + ''.join(
                 format_exception(*record.exc_info)).strip()
             record.exc_info = None
         self._queue(record)
     if Logger.isEnabledFor(self, record.levelno):
         record.name = self.name or 'NEO'
         self.parent.callHandlers(record)
Exemplo n.º 8
0
 def callHandlers(self, record):
     if self._db is not None:
         record.msg = record.getMessage()
         record.args = None
         if record.exc_info:
             record.msg = (record.msg and record.msg + '\n') + ''.join(
                 format_exception(*record.exc_info)).strip()
             record.exc_info = None
         self._queue(record)
     if Logger.isEnabledFor(self, record.levelno):
         record.name = self.name or 'NEO'
         self.parent.callHandlers(record)
Exemplo n.º 9
0
def do_notify(
    host_config: HostConfig,
    logger: Logger,
    event: Event,
    username: Optional[str] = None,
    is_cancelling: bool = False,
) -> None:
    if not _core_has_notifications_enabled(logger):
        logger.info(
            "Notifications are currently disabled. Skipped notification for event %d"
            % event["id"])
        return

    context = _create_notification_context(host_config, event, username,
                                           is_cancelling, logger)

    if logger.isEnabledFor(VERBOSE):
        logger.log(
            VERBOSE,
            "Sending notification via Check_MK with the following context:")
        for varname, value in sorted(context.items()):
            logger.log(VERBOSE, "  %-25s: %s", varname, value)

    if context["HOSTDOWNTIME"] != "0":
        logger.info("Host %s is currently in scheduled downtime. "
                    "Skipping notification of event %s." %
                    (context["HOSTNAME"], event["id"]))
        return

    # Send notification context via stdin.
    context_string = "".join("{}={}\n".format(
        varname, value.replace("\n", "\\n")
    ) for (varname, value) in context.items() if isinstance(
        value, str
    )  # context is TypedDict which is dict[str, object], in fact it is dict[str, str]
                             )

    completed_process = subprocess.run(
        ["cmk", "--notify", "stdin"],
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
        close_fds=True,
        encoding="utf-8",
        input=context_string,
        check=False,
    )
    if completed_process.returncode:
        logger.error("Error notifying via Check_MK: %s" %
                     completed_process.stdout.strip())
    else:
        logger.info(
            "Successfully forwarded notification for event %d to Check_MK" %
            event["id"])
Exemplo n.º 10
0
def raise_from_process(log: logging.Logger,
                       completed_process: CompletedProcess,
                       process_title: Callable[[], str]):
    if completed_process.returncode != 0:
        raise ProcessReturnedErrorCode(
            f'Process returned non-zero code:\n'
            f'{process_title()}\n'
            f'{render_process_msg(completed_process)}')
    else:
        if log.isEnabledFor(logging.DEBUG):
            log.debug(f'Successfully executed sub-process:\n'
                      f'{process_title()}\n'
                      f'{render_process_msg(completed_process)}')
Exemplo n.º 11
0
def do_notify(
    host_config: HostConfig,
    logger: Logger,
    event: Event,
    username: Optional[str] = None,
    is_cancelling: bool = False,
) -> None:
    if not _core_has_notifications_enabled(logger):
        logger.info(
            "Notifications are currently disabled. Skipped notification for event %d"
            % event["id"])
        return

    context = _create_notification_context(host_config, event, username,
                                           is_cancelling, logger)

    if logger.isEnabledFor(VERBOSE):
        logger.log(
            VERBOSE,
            "Sending notification via Check_MK with the following context:")
        for varname, value in sorted(context.items()):
            logger.log(VERBOSE, "  %-25s: %s", varname, value)

    if context["HOSTDOWNTIME"] != "0":
        logger.info("Host %s is currently in scheduled downtime. "
                    "Skipping notification of event %s." %
                    (context["HOSTNAME"], event["id"]))
        return

    # Send notification context via stdin.
    context_string = "".join([
        "%s=%s\n" % (varname, value.replace("\n", "\\n"))
        for (varname, value) in context.items()
    ])

    p = subprocess.Popen(  # pylint:disable=consider-using-with
        ["cmk", "--notify", "stdin"],
        stdin=subprocess.PIPE,
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
        close_fds=True,
        encoding="utf-8",
    )
    stdout, _stderr = p.communicate(input=context_string)
    status = p.returncode
    if status:
        logger.error("Error notifying via Check_MK: %s" % stdout.strip())
    else:
        logger.info(
            "Successfully forwarded notification for event %d to Check_MK" %
            event["id"])
Exemplo n.º 12
0
    def _run_invocation(self, logger: logging.Logger, cleanup: ExitStack,
                        image: str) -> List[str]:
        """
        Formulate `singularity run` command-line invocation
        """

        ans = self.cli_exe
        if logger.isEnabledFor(logging.DEBUG):
            ans.append("--verbose")
        ans += [
            "run",
            "--pwd",
            os.path.join(self.container_dir, "work"),
        ]
        if self.runtime_values.get("privileged", False) is True:
            logger.warning(
                "runtime.privileged enabled (security & portability warning)")
            ans += ["--add-caps", "all"]
        ans += self.cfg.get_list("singularity", "run_options")

        mounts = self.prepare_mounts()
        # Also create a scratch directory and mount to /tmp and /var/tmp
        # For context why this is needed:
        #   https://github.com/hpcng/singularity/issues/5718
        tempdir = cleanup.enter_context(
            tempfile.TemporaryDirectory(prefix="miniwdl_singularity_"))
        os.mkdir(os.path.join(tempdir, "tmp"))
        os.mkdir(os.path.join(tempdir, "var_tmp"))
        mounts.append(("/tmp", os.path.join(tempdir, "tmp"), True))
        mounts.append(("/var/tmp", os.path.join(tempdir, "var_tmp"), True))

        logger.info(
            _(
                "singularity invocation",
                args=" ".join(shlex.quote(s) for s in (ans + [image])),
                binds=len(mounts),
                tmpdir=tempdir,
            ))
        for (container_path, host_path, writable) in mounts:
            if ":" in (container_path + host_path):
                raise InputError(
                    "Singularity input filenames cannot contain ':'")
            ans.append("--bind")
            bind_arg = f"{host_path}:{container_path}"
            if not writable:
                bind_arg += ":ro"
            ans.append(bind_arg)
        ans.append(image)
        return ans
Exemplo n.º 13
0
    def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
        """
        Overrides the parent method to add log messages.

        :param logger: the logger to use during parsing (optional: None is supported)
        :param options:
        :return:
        """
        in_root_call = False
        if logger is not None:
            # log only for the root object, not for the children that will be created by the code below
            if not hasattr(_BaseParsingPlan.thrd_locals, 'flag_exec') \
                    or _BaseParsingPlan.thrd_locals.flag_exec == 0:
                # print('Executing Parsing Plan for ' + str(self))
                logger.debug('Executing Parsing Plan for [{location}]'
                             ''.format(location=self.obj_on_fs_to_parse.get_pretty_location(append_file_ext=False)))
                _BaseParsingPlan.thrd_locals.flag_exec = 1
                in_root_call = True

        # Common log message
        logger.debug('(P) ' + get_parsing_plan_log_str(self.obj_on_fs_to_parse, self.obj_type,
                                                       log_only_last=not in_root_call, parser=self.parser))

        try:
            res = super(_BaseParsingPlan, self).execute(logger, options)
            if logger.isEnabledFor(DEBUG):
                logger.info('(P) {loc} -> {type} SUCCESS !'
                            ''.format(loc=self.obj_on_fs_to_parse.get_pretty_location(
                    blank_parent_part=not GLOBAL_CONFIG.full_paths_in_logs,
                    compact_file_ext=True),
                    type=get_pretty_type_str(self.obj_type)))
            else:
                logger.info('SUCCESS parsed [{loc}] as a [{type}] successfully. Parser used was [{parser}]'
                            ''.format(loc=self.obj_on_fs_to_parse.get_pretty_location(compact_file_ext=True),
                                      type=get_pretty_type_str(self.obj_type),
                                      parser=str(self.parser)))
            if in_root_call:
                # print('Completed parsing successfully')
                logger.debug('Completed parsing successfully')
            return res

        finally:
            # remove threadlocal flag if needed
            if in_root_call:
                _BaseParsingPlan.thrd_locals.flag_exec = 0
Exemplo n.º 14
0
def PygtailLogger(
    logger: logging.Logger,
    filename: str,
    callback: Optional[Callable[[str], None]] = None,
    level: int = VERBOSE_LEVEL,
) -> Iterator[Callable[[], None]]:
    """
    Helper for streaming task stderr into logger using pygtail. Context manager yielding a function
    which reads the latest lines from the file and writes them into logger at verbose level. This
    function also runs automatically on context exit.

    Stops if it sees a line greater than 4KB, in case writer goes haywire.
    """
    from pygtail import Pygtail  # delayed heavy import

    pygtail = None
    if logger.isEnabledFor(level):
        pygtail = Pygtail(filename, full_lines=True)
    logger2 = logger.getChild("stderr")

    def default_callback(line: str) -> None:
        assert len(line) <= 4096, "line > 4KB"
        logger2.log(level, line.rstrip())

    callback = callback or default_callback

    def poll() -> None:
        nonlocal pygtail
        if pygtail:
            try:
                for line in pygtail:
                    callback(line)
            except Exception as exn:
                # cf. https://github.com/bgreenlee/pygtail/issues/48
                logger.warning(
                    StructuredLogMessage("log stream is incomplete",
                                         filename=filename,
                                         error=str(exn)))
                pygtail = None

    try:
        yield poll
    finally:
        poll()
Exemplo n.º 15
0
def log_current_exception(logger: logging.Logger, show_traceback: bool = None, config: Config = None) -> None:
    """
    Print format is:

        ExceptionName: message
        [optional] traceback

    We do not explicitly retrieve an exception info via sys.exc_info() as it immediately stores a reference to the
    current Python frame and/or variables causing some possible weird errors (objects are not GC'ed) and memory leaks.
    See https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/ for more information.

    Args:
        logger: the logging.Logger (or compatible) instance to use
        show_traceback: whether print the traceback or not. Ignored if the config is given (will output it there anyway)
        config: stm32pio Config instance to save. The traceback will be written to its corresponding INI file

    Returns:
        None
    """

    if show_traceback is None:
        show_traceback = logger.isEnabledFor(show_traceback_threshold_level)

    exc_full_str = traceback.format_exc()
    exc_str = exc_full_str.splitlines()[-1]
    if exc_str.startswith('Exception') and not show_traceback:
        exc_str = exc_str[len('Exception: '):]  # meaningless information
    exc_tb = ''.join(exc_full_str.splitlines(keepends=True)[:-1])

    if config is not None:
        logger.error(exc_str)
        retcode = config.save({'project': {'last_error': f"{exc_str}\n{exc_tb}"}})
        if retcode == 0:
            logger.info(f"Traceback has been saved to the {config.path.name}. It will be cleared on the next successful"
                        "run")
        else:
            logger.warning(f"Traceback has not been saved to the {config.path.name}")
    else:
        if show_traceback:
            logger.error(f"{exc_str}\n{exc_tb}")
        else:
            logger.error(exc_str)
Exemplo n.º 16
0
Arquivo: util.py Projeto: OS-Q/S48
def log_current_exception(logger: logging.Logger,
                          show_traceback_threshold_level: int = logging.DEBUG):
    """
    Print format is:

        ExceptionName: message
        [optional] traceback

    We do not explicitly retrieve an exception info via sys.exc_info() as it immediately stores a reference to the
    current Python frame and/or variables causing some possible weird errors (objects are not GC'ed) and memory leaks.
    See https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/ for more information
    """
    exc_full_str = traceback.format_exc()
    exc_str = exc_full_str.splitlines()[-1]
    if exc_str.startswith('Exception') and not logger.isEnabledFor(
            show_traceback_threshold_level):
        exc_str = exc_str[len('Exception: '):]  # meaningless information
    exc_tb = ''.join(exc_full_str.splitlines(keepends=True)[:-1])
    logger.error(f'{exc_str}\n{exc_tb}' if logger.
                 isEnabledFor(show_traceback_threshold_level) else exc_str)
Exemplo n.º 17
0
def is_logging_disabled(logger: logging.Logger):
    return not logger.isEnabledFor(logging.INFO)
Exemplo n.º 18
0
    def poll_service(self,
                     logger: logging.Logger,
                     svc: docker.models.services.Service,
                     verbose: bool = False) -> Optional[int]:
        state = "(UNKNOWN)"
        status = {}

        svc.reload()
        assert svc.attrs["Spec"]["Labels"]["miniwdl_run_id"] == self.run_id
        tasks = svc.tasks()
        if tasks:
            assert len(tasks) == 1, "docker service should have at most 1 task"
            status = tasks[0]["Status"]
            if logger.isEnabledFor(logging.DEBUG):
                logger.debug(_("docker task status", **status))
            state = status["State"]
        else:
            assert (len(self._observed_states or []) <=
                    1), "docker task shouldn't disappear from service"

        # references on docker task states:
        # https://docs.docker.com/engine/swarm/how-swarm-mode-works/swarm-task-states/
        # https://github.com/docker/swarmkit/blob/master/design/task_model.md
        # https://github.com/moby/moby/blob/8fbf2598f58fb212230e6ddbcfbde628b0458250/api/types/swarm/task.go#L12

        # log each new state
        assert isinstance(state, str) and isinstance(self._observed_states,
                                                     set)
        if state not in self._observed_states:
            loginfo = {"service": svc.short_id}
            if tasks:
                loginfo["task"] = tasks[0]["ID"][:10]
                if "NodeID" in tasks[0]:
                    loginfo["node"] = tasks[0]["NodeID"][:10]
            if status.get("DesiredState") not in (None, state):
                loginfo["desired"] = status["DesiredState"]
            logmsg = status.get("Err", status.get("Message", None))
            if logmsg and logmsg != state:
                loginfo["message"] = logmsg
            method = logger.info
            if state == "running":
                method = logger.notice  # pyre-fixme
            elif state in [
                    "failed", "shutdown", "rejected", "orphaned", "remove"
            ]:
                method = logger.error
            method(_(f"docker task {state}", **loginfo))
            self._observed_states.add(state)

        # determine whether docker task has exited
        exit_code = None
        if "ExitCode" in status.get("ContainerStatus", {}):
            exit_code = status["ContainerStatus"]["ExitCode"]
            assert isinstance(exit_code, int)

        if state in ("complete", "failed"):
            msg = _("docker task exit", state=state, exit_code=exit_code)
            if state == "failed":
                logger.error(msg)
            else:
                logger.notice(msg)  # pyre-fixme
            assert isinstance(
                exit_code, int) and (exit_code == 0) == (state == "complete")
            return exit_code
        elif {state, status.get("DesiredState")}.intersection({
                "rejected", "shutdown", "orphaned", "remove"
        }) or exit_code not in (None, 0):
            # "rejected" state usually arises from nonexistent docker image.
            # if the worker assigned a task goes down, any of the following can manifest:
            #   - exit_code=-1 with state running (or other non-terminal)
            #   - state shutdown, orphaned, remove
            #   - desired_state shutdown
            # also see GitHub issue #374
            raise (
                Error.RuntimeError if state == "rejected" else Interrupted
            )(f"docker task {state}" +
              ((", desired state " + status["DesiredState"]
                ) if status.get("DesiredState") not in (None, state) else "") +
              (f", exit code = {exit_code}" if exit_code not in (None,
                                                                 0) else "") +
              (f": {status['Err']}" if "Err" in status else ""))

        return None
        def activate_next_working_parser(self, already_caught_execution_errors: Dict[AnyParser, Exception] = None,
                                         logger: Logger = None):
            """
            Utility method to activate the next working parser. It iteratively asks each parser of the list to create
            a parsing plan, and stops at the first one that answers

            :param already_caught_execution_errors:
            :param logger:
            :return:
            """

            if (self.active_parser_idx+1) < len(self.parser_list):
                # ask each parser to create a parsing plan right here. Stop at the first working one
                for i in range(self.active_parser_idx+1, len(self.parser_list)):
                    typ, p = self.parser_list[i]
                    # if i > 0:
                    #     # print('----- Rebuilding local parsing plan with next candidate parser:')
                    #     if logger is not None:
                    #         logger.info("Rebuilding local parsing plan with {p} -> {t}"
                    #                     "".format(p=p, t=get_pretty_type_str(typ or self.obj_type)))
                    try:
                        # -- try to rebuild a parsing plan with next parser, and remember it if is succeeds
                        self.active_parsing_plan = CascadingParser.ActiveParsingPlan(p.create_parsing_plan(
                            typ or self.obj_type, self.obj_on_fs_to_parse, self.logger, _main_call=False), self.parser)
                        self.active_parser_idx = i
                        # if i > 0 and logger is not None:
                        #     logger.info('DONE Rebuilding local parsing plan for [{location}]. Resuming parsing...'
                        #                 ''.format(location=self.obj_on_fs_to_parse.get_pretty_location(
                        #         compact_file_ext=True)))
                        return

                    except Exception as err:

                        # log simplification for nested errors
                        if isinstance(err, ParsingException) and hasattr(err, 'caught'):
                            e_for_log = err.caught
                        else:
                            e_for_log = err

                        # trying to display the error in front of the indented object
                        if not GLOBAL_CONFIG.full_paths_in_logs:
                            idx = self.obj_on_fs_to_parse.get_pretty_location(blank_parent_part=True).index('|--')
                            prefix = ' ' * (idx + 4)
                        else:
                            prefix = ''

                        # -- log the error
                        if should_hide_traceback(e_for_log):
                            logger.warning("{pre} ! CAUGHT: {t} - {e}".format(pre=prefix,
                                                                           t=type(e_for_log).__name__, e=e_for_log))
                        else:
                            msg = StringIO()
                            print_error_to_io_stream(e_for_log, msg, print_big_traceback=logger.isEnabledFor(DEBUG))
                            logger.warning("----- WARNING: Caught error while creating parsing plan with parser {p}"
                                           "".format(p=p))
                            logger.warning(msg.getvalue())
                            # print('----- WARNING: Caught error while creating parsing plan with parser ' + str(p) + '.')
                            # print(msg.getvalue())
                            # (Note: we dont use warning because it does not show up in the correct order in the console)

                        # -- remember the error in order to create a CascadeError at the end in case of failure of all
                        self.parsing_plan_creation_errors[(typ or self.obj_type, p)] = err

            # no more parsers to try...
            raise CascadeError(self.parser, self, self.parsing_plan_creation_errors,
                               already_caught_execution_errors) from None
Exemplo n.º 20
0
def _maybe_configure_extended_logging(logger: Logger) -> None:
    if logger.isEnabledFor(LogLevel.TRACE.level):
        _configure_requests_debug_logging()
Exemplo n.º 21
0
def intermittent_log(
    logger: logging.Logger,
    line: str,
    frequency: float = 60,
    level: int = logging.INFO,
    negative_level: Optional[int] = None,
    caller_extra_id: Any = None,
    fn_override: Optional[str] = None,
    line_override: Optional[int] = None,
    func_override: Optional[str] = None,
    _caller: Optional[str] = None,
    _last_logged: DefaultDict[Tuple[str, int], float] = defaultdict(float),
    _times_suppressed: DefaultDict[Tuple[str, int], float] = defaultdict(int),
) -> None:
    try:
        caller = None
        if _caller:
            frame_id = _caller, caller_extra_id
        else:
            try:
                caller = inspect.stack()[1]
                frame_id = caller.filename, caller.lineno, caller_extra_id
            except:
                frame_id = "???"

        output = negative_level
        if time.time() - _last_logged[frame_id] > frequency:
            _last_logged[frame_id] = time.time()
            if _times_suppressed[frame_id]:
                line += f" [log suppressed {_times_suppressed[frame_id]} times since last]"
            _times_suppressed[frame_id] = 0
            output = level
        else:
            _times_suppressed[frame_id] += 1
        if output and logger.isEnabledFor(output):
            if caller:
                co = caller.frame.f_code
                fn, lno, func, sinfo = (
                    co.co_filename,
                    caller.frame.f_lineno,
                    co.co_name,
                    None,
                )
                record = logger.makeRecord(
                    logger.name,
                    output,
                    fn_override or str(fn),
                    line_override or lno,
                    line,
                    {},
                    None,
                    func_override or func,
                    None,
                    sinfo,
                )
            else:
                record = logger.makeRecord(
                    logger.name,
                    output,
                    fn_override or "???",
                    line_override or 0,
                    line,
                    {},
                    None,
                    func_override,
                )
            logger.handle(record)
    except:
        # noinspection PyProtectedMember
        logger.log(level, line, ())
Exemplo n.º 22
0
    def __get_parsing_plan_for_multifile_children(self, obj_on_fs: PersistedObject, desired_type: Type[Any],
                                                  children_on_fs: Dict[str, PersistedObject], logger: Logger) \
            -> Dict[str, Any]:
        """
        Simply inspects the required type to find the names and types of its constructor arguments.
        Then relies on the inner ParserFinder to parse each of them.

        :param obj_on_fs:
        :param desired_type:
        :param children_on_fs:
        :param logger:
        :return:
        """

        # -- (a) collect pep-484 information in the class constructor to be able to understand what is required
        constructor_args_types_and_opt = get_constructor_attributes_types(
            desired_type)

        # -- (b) plan to parse each attribute required by the constructor
        children_plan = dict()  # results will be put in this object

        # --use sorting in order to lead to reproducible results in case of multiple errors
        for attribute_name, att_desc in sorted(
                constructor_args_types_and_opt.items()):
            attribute_is_mandatory = att_desc[1]
            attribute_type = att_desc[0]

            # get the child
            if attribute_name in children_on_fs.keys():
                child_on_fs = children_on_fs[attribute_name]

                # find a parser
                t, parser_found = self.parser_finder.build_parser_for_fileobject_and_desiredtype(
                    child_on_fs, attribute_type, logger=logger)
                # create a parsing plan
                children_plan[
                    attribute_name] = parser_found.create_parsing_plan(
                        t, child_on_fs, logger=logger, _main_call=False)
            else:
                if attribute_is_mandatory:
                    raise MissingMandatoryAttributeFiles.create(
                        obj_on_fs, desired_type, attribute_name)
                else:
                    # we don't care : optional attribute
                    # dont use warning since it does not show up nicely
                    msg = 'NOT FOUND - This optional constructor attribute for type ' \
                          + get_pretty_type_str(desired_type) + ' was not found on file system, but this may be normal'\
                          ' - this message is displayed \'just in case\'.'
                    if logger.isEnabledFor(DEBUG):
                        logger.warning(
                            '(B) ' + obj_on_fs.get_pretty_child_location(
                                attribute_name, blank_parent_part=True) +
                            ': ' + msg)
                    else:
                        logger.warning(
                            'WARNING parsing [{loc}] as a [{typ}]: optional constructor attribute [{att}] '
                            'not found on file system. This may be normal - this message is displayed \'just'
                            ' in case\'.'.format(
                                loc=obj_on_fs.get_pretty_location(
                                    blank_parent_part=False,
                                    append_file_ext=False),
                                typ=get_pretty_type_str(desired_type),
                                att=attribute_name))

        return children_plan
        def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]):
            """
            Delegates execution to currently active parser. In case of an exception, recompute the parsing plan and
            do it again on the next one.

            :param logger:
            :param options:
            :return:
            """
            if self.active_parsing_plan is not None:
                execution_errors = OrderedDict()
                while self.active_parsing_plan is not None:
                    try:
                        # -- try to execute current plan
                        return self.active_parsing_plan.execute(logger, options)

                    except Exception as err:
                        # -- log the error
                        if not logger.isEnabledFor(DEBUG):
                            logger.warning('ERROR while parsing [{location}] into a [{type}] using [{parser}]. '
                                           'Set log level to DEBUG for details'.format(
                                location=self.obj_on_fs_to_parse.get_pretty_location(compact_file_ext=True),
                                type=get_pretty_type_str(self.active_parsing_plan.obj_type),
                                parser=str(self.active_parsing_plan.parser), err_type=type(err).__name__, err=err))
                        else:
                            # log simplification for nested errors
                            if isinstance(err, ParsingException) and hasattr(err, 'caught'):
                                e_for_log = err.caught
                            else:
                                e_for_log = err

                            # trying to display the error in front of the indented object
                            if not GLOBAL_CONFIG.full_paths_in_logs:
                                idx = self.obj_on_fs_to_parse.get_pretty_location(blank_parent_part=True).index(
                                    '|--')
                                prefix = ' ' * (idx + 4)
                            else:
                                prefix = ''

                            # -- log the error
                            if should_hide_traceback(e_for_log):
                                logger.warning("{pre} ! CAUGHT: {t} - {e}".format(pre=prefix,
                                                                               t=type(e_for_log).__name__,
                                                                               e=short_exception_message(e_for_log)))
                            else:
                                msg = StringIO()
                                big_tb = logger.isEnabledFor(DEBUG)
                                print_error_to_io_stream(e_for_log, msg, print_big_traceback=big_tb)
                                logger.warning('  !! Caught error during execution !!')
                                logger.warning(msg.getvalue())
                        # print('----- WARNING: Caught error during execution : ')
                        # print(msg.getvalue())
                        # (Note: we dont use warning because it does not show up in the correct order in the console)

                        # -- remember the error in order to create a CascadeError at the end in case of failure of all
                        execution_errors[(self.active_parsing_plan.obj_type, self.active_parsing_plan.parser)] = err

                        # -- try to switch to the next parser of the cascade, if any
                        self.activate_next_working_parser(execution_errors, logger)

                raise CascadeError(self.parser, self, self.parsing_plan_creation_errors, execution_errors) from None

            else:
                raise Exception('Cannot execute this parsing plan : empty parser list !')
Exemplo n.º 24
0
	def isEnabledFor(self, level):
		threadName = threading.current_thread().name
		if self.root.agentLevels.get(threadName, 0) > level:
			return 0
		return Logger.isEnabledFor(self, level)
Exemplo n.º 25
0
def log_dataframe(frame: pd.DataFrame,
                  logger: logging.Logger,
                  level: int = logging.DEBUG):
    if logger.isEnabledFor(level):
        print('\n', frame)