Пример #1
0
def set_multiprocessing_logging_level_by_opts(opts):
    """
    This will set the multiprocessing logging level to the lowest
    logging level of all the types of logging that are configured.
    """
    global __MP_LOGGING_LEVEL

    log_levels = [
        LOG_LEVELS.get(opts.get("log_level", "").lower(), logging.ERROR),
        LOG_LEVELS.get(opts.get("log_level_logfile", "").lower(), logging.ERROR),
    ]
    for level in six.itervalues(opts.get("log_granular_levels", {})):
        log_levels.append(LOG_LEVELS.get(level.lower(), logging.ERROR))

    __MP_LOGGING_LEVEL = min(log_levels)
Пример #2
0
def set_logger_level(logger_name, log_level="error"):
    """
    Tweak a specific logger's logging level
    """
    logging.getLogger(logger_name).setLevel(
        LOG_LEVELS.get(log_level.lower(), logging.ERROR)
    )
Пример #3
0
def set_logger_level(logger_name, log_level='error'):
    '''
    Tweak a specific logger's logging level
    '''
    logging.getLogger(logger_name).setLevel(
        LOG_LEVELS.get(log_level.lower(), logging.ERROR)
    )
Пример #4
0
def verify_log(opts):
    """
    If an insecre logging configuration is found, show a warning
    """
    level = LOG_LEVELS.get(str(opts.get("log_level")).lower(), logging.NOTSET)

    if level < logging.INFO:
        log.warning(
            "Insecure logging configuration detected! Sensitive data may be logged."
        )
Пример #5
0
def setup_temp_logger(log_level='error'):
    '''
    Setup the temporary console logger
    '''
    if is_temp_logging_configured():
        logging.getLogger(__name__).warning(
            'Temporary logging is already configured'
        )
        return

    if log_level is None:
        log_level = 'warning'

    level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)

    handler = None
    for handler in logging.root.handlers:
        if handler in (LOGGING_NULL_HANDLER, LOGGING_STORE_HANDLER):
            continue

        if not hasattr(handler, 'stream'):
            # Not a stream handler, continue
            continue

        if handler.stream is sys.stderr:
            # There's already a logging handler outputting to sys.stderr
            break
    else:
        handler = LOGGING_TEMP_HANDLER
    handler.setLevel(level)

    # Set the default temporary console formatter config
    formatter = logging.Formatter(
        '[%(levelname)-8s] %(message)s', datefmt='%H:%M:%S'
    )
    handler.setFormatter(formatter)
    logging.root.addHandler(handler)

    # Sync the null logging handler messages with the temporary handler
    if LOGGING_NULL_HANDLER is not None:
        LOGGING_NULL_HANDLER.sync_with_handlers([handler])
    else:
        logging.getLogger(__name__).debug(
            'LOGGING_NULL_HANDLER is already None, can\'t sync messages '
            'with it'
        )

    # Remove the temporary null logging handler
    __remove_null_logging_handler()

    global __TEMP_LOGGING_CONFIGURED
    __TEMP_LOGGING_CONFIGURED = True
Пример #6
0
def setup_console_logger(log_level='error', log_format=None, date_format=None):
    '''
    Setup the console logger
    '''
    if is_console_configured():
        logging.getLogger(__name__).warning(
            'Console logging already configured')
        return

    # Remove the temporary logging handler
    __remove_temp_logging_handler()

    if log_level is None:
        log_level = 'warning'

    level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)

    setLogRecordFactory(SaltColorLogRecord)

    handler = None
    for handler in logging.root.handlers:
        if handler is LOGGING_STORE_HANDLER:
            continue

        if not hasattr(handler, 'stream'):
            # Not a stream handler, continue
            continue

        if handler.stream is sys.stderr:
            # There's already a logging handler outputting to sys.stderr
            break
    else:
        handler = StreamHandler(sys.stderr)
    handler.setLevel(level)

    # Set the default console formatter config
    if not log_format:
        log_format = '[%(levelname)-8s] %(message)s'
    if not date_format:
        date_format = '%H:%M:%S'

    formatter = logging.Formatter(log_format, datefmt=date_format)

    handler.setFormatter(formatter)
    logging.root.addHandler(handler)

    global __CONSOLE_CONFIGURED
    global __LOGGING_CONSOLE_HANDLER
    __CONSOLE_CONFIGURED = True
    __LOGGING_CONSOLE_HANDLER = handler
Пример #7
0
    def __init__(
        self,
        args=None,
        executable=None,
        shell=False,
        cwd=None,
        env=None,
        preexec_fn=None,
        # Terminal Size
        rows=None,
        cols=None,
        # Logging options
        log_stdin=None,
        log_stdin_level="debug",
        log_stdout=None,
        log_stdout_level="debug",
        log_stderr=None,
        log_stderr_level="debug",
        # sys.stdXYZ streaming options
        stream_stdout=None,
        stream_stderr=None,
        # Used for tests
        force_receive_encoding=__salt_system_encoding__,
    ):
        if not args and not executable:
            raise TerminalException(
                'You need to pass at least one of "args", "executable" ')
        self.args = args
        self.executable = executable
        self.shell = shell
        self.cwd = cwd
        self.env = env
        self.preexec_fn = preexec_fn
        self.receive_encoding = force_receive_encoding

        if rows is None and cols is None:
            rows, cols = self.__detect_parent_terminal_size()
        elif rows is not None and cols is None:
            _, cols = self.__detect_parent_terminal_size()
        elif rows is None and cols is not None:
            rows, _ = self.__detect_parent_terminal_size()
        self.rows = rows
        self.cols = cols
        self.pid = None
        self.stdin = None
        self.stdout = None
        self.stderr = None

        self.child_fd = None
        self.child_fde = None

        self.partial_data_stdout = b""
        self.partial_data_stderr = b""

        self.closed = True
        self.flag_eof_stdout = False
        self.flag_eof_stderr = False
        self.terminated = True
        self.exitstatus = None
        self.signalstatus = None
        # status returned by os.waitpid
        self.status = None

        if stream_stdout is True:
            self.stream_stdout = sys.stdout
        elif stream_stdout is False:
            self.stream_stdout = None
        elif stream_stdout is not None:
            if (not hasattr(stream_stdout, "write")
                    or not hasattr(stream_stdout, "flush")
                    or not hasattr(stream_stdout, "close")):
                raise TerminalException(
                    "'stream_stdout' needs to have at least 3 methods, "
                    "'write()', 'flush()' and 'close()'.")
            self.stream_stdout = stream_stdout
        else:
            raise TerminalException(
                "Don't know how to handle '{}' as the VT's "
                "'stream_stdout' parameter.".format(stream_stdout))

        if stream_stderr is True:
            self.stream_stderr = sys.stderr
        elif stream_stderr is False:
            self.stream_stderr = None
        elif stream_stderr is not None:
            if (not hasattr(stream_stderr, "write")
                    or not hasattr(stream_stderr, "flush")
                    or not hasattr(stream_stderr, "close")):
                raise TerminalException(
                    "'stream_stderr' needs to have at least 3 methods, "
                    "'write()', 'flush()' and 'close()'.")
            self.stream_stderr = stream_stderr
        else:
            raise TerminalException(
                "Don't know how to handle '{}' as the VT's "
                "'stream_stderr' parameter.".format(stream_stderr))

        try:
            self._spawn()
        except Exception as err:  # pylint: disable=W0703
            # A lot can go wrong, so that's why we're catching the most general
            # exception type
            log.warning("Failed to spawn the VT: %s",
                        err,
                        exc_info_on_loglevel=logging.DEBUG)
            raise TerminalException(
                "Failed to spawn the VT. Error: {}".format(err))

        log.debug(
            "Child Forked! PID: %s  STDOUT_FD: %s  STDERR_FD: %s",
            self.pid,
            self.child_fd,
            self.child_fde,
        )
        terminal_command = " ".join(self.args)
        if ('decode("base64")' in terminal_command
                or "base64.b64decode(" in terminal_command):
            log.debug(
                "VT: Salt-SSH SHIM Terminal Command executed. Logged to TRACE")
            log.trace("Terminal Command: %s", terminal_command)
        else:
            log.debug("Terminal Command: %s", terminal_command)

        # Setup logging after spawned in order to have a pid value
        self.stdin_logger_level = LOG_LEVELS.get(log_stdin_level,
                                                 log_stdin_level)
        if log_stdin is True:
            self.stdin_logger = logging.getLogger("{}.{}.PID-{}.STDIN".format(
                __name__, self.__class__.__name__, self.pid))
        elif log_stdin is not None:
            if not isinstance(log_stdin, logging.Logger):
                raise RuntimeError(
                    "'log_stdin' needs to subclass `logging.Logger`")
            self.stdin_logger = log_stdin
        else:
            self.stdin_logger = None

        self.stdout_logger_level = LOG_LEVELS.get(log_stdout_level,
                                                  log_stdout_level)
        if log_stdout is True:
            self.stdout_logger = logging.getLogger(
                "{}.{}.PID-{}.STDOUT".format(__name__, self.__class__.__name__,
                                             self.pid))
        elif log_stdout is not None:
            if not isinstance(log_stdout, logging.Logger):
                raise RuntimeError(
                    "'log_stdout' needs to subclass `logging.Logger`")
            self.stdout_logger = log_stdout
        else:
            self.stdout_logger = None

        self.stderr_logger_level = LOG_LEVELS.get(log_stderr_level,
                                                  log_stderr_level)
        if log_stderr is True:
            self.stderr_logger = logging.getLogger(
                "{}.{}.PID-{}.STDERR".format(__name__, self.__class__.__name__,
                                             self.pid))
        elif log_stderr is not None:
            if not isinstance(log_stderr, logging.Logger):
                raise RuntimeError(
                    "'log_stderr' needs to subclass `logging.Logger`")
            self.stderr_logger = log_stderr
        else:
            self.stderr_logger = None
Пример #8
0
def setup_logfile_logger(log_path,
                         log_level='error',
                         log_format=None,
                         date_format=None,
                         max_bytes=0,
                         backup_count=0):
    '''
    Setup the logfile logger

    Since version 0.10.6 we support logging to syslog, some examples:

        tcp://localhost:514/LOG_USER
        tcp://localhost/LOG_DAEMON
        udp://localhost:5145/LOG_KERN
        udp://localhost
        file:///dev/log
        file:///dev/log/LOG_SYSLOG
        file:///dev/log/LOG_DAEMON

    The above examples are self explanatory, but:
        <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility>

    If you're thinking on doing remote logging you might also be thinking that
    you could point salt's logging to the remote syslog. **Please Don't!**
    An issue has been reported when doing this over TCP when the logged lines
    get concatenated. See #3061.

    The preferred way to do remote logging is setup a local syslog, point
    salt's logging to the local syslog(unix socket is much faster) and then
    have the local syslog forward the log messages to the remote syslog.
    '''

    if is_logfile_configured():
        logging.getLogger(__name__).warning(
            'Logfile logging already configured')
        return

    if log_path is None:
        logging.getLogger(__name__).warning(
            'log_path setting is set to `None`. Nothing else to do')
        return

    # Remove the temporary logging handler
    __remove_temp_logging_handler()

    if log_level is None:
        log_level = 'warning'

    level = LOG_LEVELS.get(log_level.lower(), logging.ERROR)

    parsed_log_path = urlparse(log_path)

    root_logger = logging.getLogger()

    if parsed_log_path.scheme in ('tcp', 'udp', 'file'):
        syslog_opts = {
            'facility': SysLogHandler.LOG_USER,
            'socktype': socket.SOCK_DGRAM
        }

        if parsed_log_path.scheme == 'file' and parsed_log_path.path:
            facility_name = parsed_log_path.path.split(os.sep)[-1].upper()
            if not facility_name.startswith('LOG_'):
                # The user is not specifying a syslog facility
                facility_name = 'LOG_USER'  # Syslog default
                syslog_opts['address'] = parsed_log_path.path
            else:
                # The user has set a syslog facility, let's update the path to
                # the logging socket
                syslog_opts['address'] = os.sep.join(
                    parsed_log_path.path.split(os.sep)[:-1])
        elif parsed_log_path.path:
            # In case of udp or tcp with a facility specified
            facility_name = parsed_log_path.path.lstrip(os.sep).upper()
            if not facility_name.startswith('LOG_'):
                # Logging facilities start with LOG_ if this is not the case
                # fail right now!
                raise RuntimeError(
                    'The syslog facility \'{0}\' is not known'.format(
                        facility_name))
        else:
            # This is the case of udp or tcp without a facility specified
            facility_name = 'LOG_USER'  # Syslog default

        facility = getattr(SysLogHandler, facility_name, None)
        if facility is None:
            # This python syslog version does not know about the user provided
            # facility name
            raise RuntimeError(
                'The syslog facility \'{0}\' is not known'.format(
                    facility_name))
        syslog_opts['facility'] = facility

        if parsed_log_path.scheme == 'tcp':
            # tcp syslog support was only added on python versions >= 2.7
            if sys.version_info < (2, 7):
                raise RuntimeError(
                    'Python versions lower than 2.7 do not support logging '
                    'to syslog using tcp sockets')
            syslog_opts['socktype'] = socket.SOCK_STREAM

        if parsed_log_path.scheme in ('tcp', 'udp'):
            syslog_opts['address'] = (parsed_log_path.hostname,
                                      parsed_log_path.port
                                      or logging.handlers.SYSLOG_UDP_PORT)

        if sys.version_info < (2, 7) or parsed_log_path.scheme == 'file':
            # There's not socktype support on python versions lower than 2.7
            syslog_opts.pop('socktype', None)

        try:
            # Et voilá! Finally our syslog handler instance
            handler = SysLogHandler(**syslog_opts)
        except socket.error as err:
            logging.getLogger(__name__).error(
                'Failed to setup the Syslog logging handler: %s', err)
            shutdown_multiprocessing_logging_listener()
            sys.exit(2)
    else:
        # make sure, the logging directory exists and attempt to create it if necessary
        log_dir = os.path.dirname(log_path)
        if not os.path.exists(log_dir):
            logging.getLogger(__name__).info(
                'Log directory not found, trying to create it: %s', log_dir)
            try:
                os.makedirs(log_dir, mode=0o700)
            except OSError as ose:
                logging.getLogger(__name__).warning(
                    'Failed to create directory for log file: %s (%s)',
                    log_dir, ose)
                return
        try:
            # Logfile logging is UTF-8 on purpose.
            # Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a
            # user is not using plain ASCII, their system should be ready to
            # handle UTF-8.
            if max_bytes > 0:
                handler = RotatingFileHandler(log_path,
                                              mode='a',
                                              maxBytes=max_bytes,
                                              backupCount=backup_count,
                                              encoding='utf-8',
                                              delay=0)
            else:
                handler = WatchedFileHandler(log_path,
                                             mode='a',
                                             encoding='utf-8',
                                             delay=0)
        except (IOError, OSError):
            logging.getLogger(__name__).warning(
                'Failed to open log file, do you have permission to write to %s?',
                log_path)
            # Do not proceed with any more configuration since it will fail, we
            # have the console logging already setup and the user should see
            # the error.
            return

    handler.setLevel(level)

    # Set the default console formatter config
    if not log_format:
        log_format = '%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s'
    if not date_format:
        date_format = '%Y-%m-%d %H:%M:%S'

    formatter = logging.Formatter(log_format, datefmt=date_format)

    handler.setFormatter(formatter)
    root_logger.addHandler(handler)

    global __LOGFILE_CONFIGURED
    global __LOGGING_LOGFILE_HANDLER
    __LOGFILE_CONFIGURED = True
    __LOGGING_LOGFILE_HANDLER = handler
Пример #9
0
    def call(self):
        """
        Call the module
        """
        ret = {}
        fun = self.opts["fun"]
        ret["jid"] = salt.utils.jid.gen_jid(self.opts)
        proc_fn = os.path.join(
            salt.minion.get_proc_dir(self.opts["cachedir"]), ret["jid"]
        )
        if fun not in self.minion.functions:
            docs = self.minion.functions["sys.doc"]("{}*".format(fun))
            if docs:
                docs[fun] = self.minion.functions.missing_fun_string(fun)
                ret["out"] = "nested"
                ret["return"] = docs
                return ret
            sys.stderr.write(self.minion.functions.missing_fun_string(fun))
            mod_name = fun.split(".")[0]
            if mod_name in self.minion.function_errors:
                sys.stderr.write(
                    " Possible reasons: {}\n".format(
                        self.minion.function_errors[mod_name]
                    )
                )
            else:
                sys.stderr.write("\n")
            sys.exit(-1)
        metadata = self.opts.get("metadata")
        if metadata is not None:
            metadata = salt.utils.args.yamlify_arg(metadata)
        try:
            sdata = {
                "fun": fun,
                "pid": os.getpid(),
                "jid": ret["jid"],
                "tgt": "salt-call",
            }
            if metadata is not None:
                sdata["metadata"] = metadata
            args, kwargs = salt.minion.load_args_and_kwargs(
                self.minion.functions[fun],
                salt.utils.args.parse_input(
                    self.opts["arg"], no_parse=self.opts.get("no_parse", [])
                ),
                data=sdata,
            )
            try:
                with salt.utils.files.fopen(proc_fn, "w+b") as fp_:
                    fp_.write(salt.payload.dumps(sdata))
            except NameError:
                # Don't require msgpack with local
                pass
            except OSError:
                sys.stderr.write(
                    "Cannot write to process directory. "
                    "Do you have permissions to "
                    "write to {} ?\n".format(proc_fn)
                )
            func = self.minion.functions[fun]
            data = {"arg": args, "fun": fun}
            data.update(kwargs)
            executors = getattr(
                self.minion, "module_executors", []
            ) or salt.utils.args.yamlify_arg(
                self.opts.get("module_executors", "[direct_call]")
            )
            if self.opts.get("executor_opts", None):
                data["executor_opts"] = salt.utils.args.yamlify_arg(
                    self.opts["executor_opts"]
                )
            if isinstance(executors, str):
                executors = [executors]
            try:
                for name in executors:
                    fname = "{}.execute".format(name)
                    if fname not in self.minion.executors:
                        raise SaltInvocationError(
                            "Executor '{}' is not available".format(name)
                        )
                    ret["return"] = self.minion.executors[fname](
                        self.opts, data, func, args, kwargs
                    )
                    if ret["return"] is not None:
                        break
            except TypeError as exc:
                sys.stderr.write(
                    "\nPassed invalid arguments: {}.\n\nUsage:\n".format(exc)
                )
                salt.utils.stringutils.print_cli(func.__doc__)
                active_level = LOG_LEVELS.get(
                    self.opts["log_level"].lower(), logging.ERROR
                )
                if active_level <= logging.DEBUG:
                    trace = traceback.format_exc()
                    sys.stderr.write(trace)
                sys.exit(salt.defaults.exitcodes.EX_GENERIC)
            try:
                retcode = self.minion.executors.pack["__context__"].get("retcode", 0)
            except AttributeError:
                retcode = salt.defaults.exitcodes.EX_GENERIC

            if retcode == 0:
                # No nonzero retcode in __context__ dunder. Check if return
                # is a dictionary with a "result" or "success" key.
                try:
                    func_result = all(
                        ret["return"].get(x, True) for x in ("result", "success")
                    )
                except Exception:  # pylint: disable=broad-except
                    # return data is not a dict
                    func_result = True
                if not func_result:
                    retcode = salt.defaults.exitcodes.EX_GENERIC

            ret["retcode"] = retcode
        except (CommandExecutionError) as exc:
            msg = "Error running '{0}': {1}\n"
            active_level = LOG_LEVELS.get(self.opts["log_level"].lower(), logging.ERROR)
            if active_level <= logging.DEBUG:
                sys.stderr.write(traceback.format_exc())
            sys.stderr.write(msg.format(fun, exc))
            sys.exit(salt.defaults.exitcodes.EX_GENERIC)
        except CommandNotFoundError as exc:
            msg = "Command required for '{0}' not found: {1}\n"
            sys.stderr.write(msg.format(fun, exc))
            sys.exit(salt.defaults.exitcodes.EX_GENERIC)
        try:
            os.remove(proc_fn)
        except OSError:
            pass
        if hasattr(self.minion.functions[fun], "__outputter__"):
            oput = self.minion.functions[fun].__outputter__
            if isinstance(oput, str):
                ret["out"] = oput
        is_local = (
            self.opts["local"]
            or self.opts.get("file_client", False) == "local"
            or self.opts.get("master_type") == "disable"
        )
        returners = self.opts.get("return", "").split(",")
        if (not is_local) or returners:
            ret["id"] = self.opts["id"]
            ret["fun"] = fun
            ret["fun_args"] = self.opts["arg"]
            if metadata is not None:
                ret["metadata"] = metadata

        for returner in returners:
            if not returner:  # if we got an empty returner somehow, skip
                continue
            try:
                ret["success"] = True
                self.minion.returners["{}.returner".format(returner)](ret)
            except Exception:  # pylint: disable=broad-except
                pass

        # return the job infos back up to the respective minion's master
        if not is_local:
            try:
                mret = ret.copy()
                mret["jid"] = "req"
                self.return_pub(mret)
            except Exception:  # pylint: disable=broad-except
                pass
        elif self.opts["cache_jobs"]:
            # Local job cache has been enabled
            salt.utils.minion.cache_jobs(self.opts, ret["jid"], ret)

        return ret