def inject_logging(quiet): """Injects logging""" null_handler = NullHandler(level='DEBUG') null_handler.push_application() # Discard any message lesser than INFO log_handler = MonitoringFileHandler(os.path.join(LOG_DIR, 'thekraken.log'), level='INFO') log_handler.push_application() if not quiet: console_handler = StreamHandler(sys.stdout, level='DEBUG', bubble=True) console_handler.push_application()
def setup_logger(config): if config.has_option(SLACK_SECTION_NAME, "log_output"): output_path = config.get(SLACK_SECTION_NAME, "log_output") dir_path, file_name = os.path.split(output_path) if not os.path.exists(dir_path): os.makedirs(dir_path) else: stream_handler = StreamHandler(sys.stdout) stream_handler.push_application()
def __init__(self, stream, level): """ Virtually private constructor. """ if UniLogger.__instance != None: raise Exception("Logger is already been instantiated") UniLogger.__instance = self UniLogger.logger = Logger('uni-logger') handler = StreamHandler(stream) handler.level_name = level handler.formatter = self.json_formatter handler.push_application()
def setup_logger(config): if config.has_option(SLACK_SECTION_NAME, "log_output"): output_path = config.get(SLACK_SECTION_NAME, "log_output") dir_path, file_name = os.path.split(output_path) if not os.path.exists(dir_path): os.makedirs(dir_path) file_handler = RotatingFileHandler(output_path, backup_count=5) file_handler.push_application() else: stream_handler = StreamHandler(sys.stdout) stream_handler.push_application()
def setup(): if not os.path.exists(LOG_FILE_DIR): os.mkdir(LOG_FILE_DIR) file_handler = TimedRotatingFileHandler( filename=LOG_FILE_PATH, backup_count=config.get_logging_backup_count()) stream_handler = StreamHandler(sys.stdout, level='CRITICAL') stream_handler.format_string = '{record.level_name}: {record.channel}: {record.message}' file_handler.push_application() stream_handler.push_application()
def basicConfig(level='INFO', redirectLogging=False, colorized=False): if not colorized: handler = StreamHandler(sys.stderr, level=level, bubble=True) else: handler = ColorizedHandler(level=level, bubble=True) handler.force_color() handler.format_string = mainFormatString handler.push_application() if redirectLogging: redirect_logging() redirect_warnings()
def __init(): driver = config.get('app.log.driver', 'stderr') level = config.get('app.log.level', 'DEBUG').upper() global __handler global __loggers if driver == 'stderr': __handler = StreamHandler(sys.stderr, level=level) elif driver == 'stdout': __handler = StreamHandler(sys.stdout, level=level) elif driver == 'file': __handler = FileHandler(filename=__get_log_file(), level=level) else: raise Exception('Invalid driver for log') __handler.push_application() __loggers['core'] = Logger('Core') container.register('logger', __loggers['core'])
def get_logger(format_string=None): """Returns a singleton instance of a LogBook Logger Args: format_string: specifies how the log messages should be formatted Returns: A logbook Logger """ if format_string is None: format_string = ( u'[{record.time:%Y-%m-%d %H:%M:%S.%f} pid({record.process})] ' + u'{record.level_name}: {record.module}::{record.func_name}:{record.lineno} {record.message}' ) # default_handler = StderrHandler(format_string=log_format) default_handler = StreamHandler(sys.stdout, format_string=format_string) default_handler.push_application() return LoggerSingle(__name__)
# coding=utf-8 import sys from logbook import Logger, StreamHandler, compat from py_dice import api_routes if __name__ == "__main__": log = Logger(__name__) handler = StreamHandler( sys.stdout, level="INFO", format_string= "{record.channel}: [{record.level_name}] {record.message}", ) compat.redirect_logging() handler.push_application() api_routes.start_api()
def get_logger(name, debug=True): logbook.set_datetime_format('local') handler = StreamHandler(sys.stdout) if debug else NullHandler() handler.push_application() return Logger(os.path.basename(name))
class Process(metaclass=ABCMeta): """ Abstract class for a process. This class both contains code that will be called in the main process, and code to be called when the child process has been created. """ name: str services: Tuple[str] = () def __init__(self): self.started = False self.services = {} self.pid = os.getpid() self.logger = ProcessLogger(self) # Configure the process logger self.file_handler = FileHandler(f"logs/{self.name}.log", encoding="utf-8", level="DEBUG", delay=True) self.file_handler.format_string = ( "{record.time:%Y-%m-%d %H:%M:%S.%f%z} [{record.level_name}] " "{record.message}" ) self.stream_handler = StreamHandler(sys.stdout, encoding="utf-8", level="INFO", bubble=True) self.stream_handler.format_string = ( "[{record.level_name}] {record.channel}: {record.message}" ) self.file_handler.push_application() self.stream_handler.push_application() def __repr__(self): desc = f"<Process {self.name}, " if self.started: desc += f"running on PID={self.pid}" else: desc += "not running" desc += ">" return desc async def start(self): """Called when the process start.""" self.should_stop = asyncio.Event() self.logger.debug(f"Starting process (PID={self.pid}...") for name in type(self).services: module_name = f"service.{name}" module = import_module(module_name) cls = getattr(module, "Service") service = cls(process=self) self.services[name] = service await service.start() self.logger.debug("... process started.") self.started = True await self.setup() await self.should_stop.wait() async def stop(self): """Called when the process stop.""" self.logger.debug("Stopping process...") for name, service in tuple(self.services.items()): await service.stop() del self.services[name] self.started = False await self.cleanup() self.logger.debug("... process stopped.") @abstractmethod async def setup(self): """Called when services have all been started.""" pass @abstractmethod async def cleanup(self): """Called when the process is about to be stopped.""" pass @staticmethod def is_running(pid: int) -> bool: """ Is the given process running? Args: Process ID (int): the process identifier (PID). Returns: running (bool): whether the process is running or not. """ if platform.system() == "Windows": kernel32 = ctypes.windll.kernel32 handle = kernel32.OpenProcess(1, 0, pid) if handle == 0: return False # If the process exited recently, a PID may still # exist for the handle. So, check if we can get the exit code. exit_code = ctypes.wintypes.DWORD() is_running = ( kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code)) == 0) kernel32.CloseHandle(handle) # See if we couldn't get the exit code or the exit code indicates # that the process is still running. return is_running and exit_code.value == _STILL_ACTIVE # On Linux/Mac, just try to kill the process with signal 0. try: os.kill(pid, 0) except OSError: return False return True def run_command(self, command) -> int: """ Run the specified command, reutning its status. Args: command (str): the command to run. """ if platform.system() != 'Windows': command = split(command) self.logger.debug( f"Calling the {command!r} command" ) return run(command).returncode def start_process(self, process_name): """ Start a task in a separate process. This simply is a helper to create processes. This is most useful for the launcher and portal process. The created process will execute in a separate process and synchronization will have to be done through the CRUX/host service. Args: process_name (str): the name of the process to start. The name should be the script or executable name without extension. If the Python script is frozen (`sys.frozen` set to True), then the command is called as is. In other word, if the process name is "portal": 1. If not frozen, executes 'python portal.py'. 2. If frozen, executes 'portal'. """ # Under Windows, specify a different creation flag creationflags = 0x08000000 if platform.system() == "Windows" else 0 command = f"python {process_name}.py" frozen = getattr(sys, "frozen", False) if frozen: command = process_name command += ".exe" if platform.system() == 'Windows' else "" stdout = stderr = PIPE if platform.system() == 'Windows': if frozen: stdout = stderr = None elif platform.system() == "Linux": if frozen: command = "./" + command command = command.split(" ") self.logger.debug( f"Starting the {process_name!r} process: {command!r}" ) process = Popen(command, stdout=stdout, stderr=stderr, creationflags=creationflags) return process
import os import re import sys import pymysql import datetime from pymongo import MongoClient from logbook import Logger, StreamHandler, TimedRotatingFileHandler stream_log = StreamHandler(sys.stdout) stream_log.push_application() log_file = TimedRotatingFileHandler(os.path.join(os.getcwd(), "index.log"), backup_count=3) logger = Logger('Logbook') logger.handlers = [] # logger.handlers.append(log_file) logger.handlers.append(stream_log) env = os.environ.get stock_format = [r'^[SI][ZHX]\d{6}$', r'^\d{6}\.[A-Z]{4}$'] def little8code(x): """ 转换为前缀模式 :param x: :return:
from functools import partial from arrow import Arrow from cryptography.hazmat.backends.openssl import backend as openssl from cryptography.hazmat.primitives import hashes from logbook import Logger, StreamHandler from marshmallow import Schema, fields, validates_schema, ValidationError from parsec import exceptions # TODO: useful ? LOG_FORMAT = '[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] ({record.thread_name})' \ ' {record.level_name}: {record.channel}: {record.message}' logger = Logger('Parsec') logger_stream = StreamHandler(sys.stdout, format_string=LOG_FORMAT) logger_stream.push_application() def to_jsonb64(raw: bytes): return base64.encodebytes(raw).decode().replace('\\n', '') def from_jsonb64(msg: str): return base64.decodebytes(msg.encode()) # TODO: monkeypatch is ugly but I'm in a hurry... def _jsonb64_serialize(obj): try: return to_jsonb64(obj) except:
import sys from logbook import Logger, StreamHandler LOG_NAME = "BaseVar" log_handler = StreamHandler(sys.stderr) log_handler.push_application() logger = Logger(LOG_NAME)
def push_stream_handler(stream=sys.stdout, level: int = NOTICE, encoding: str = 'utf-8') -> StreamHandler: handler = StreamHandler(stream=stream, level=level, encoding=encoding) handler.push_application() return handler
record.extra['basename'] = os.path.basename(record.filename) record.extra['level_color'] = get_log_color(record.level) record.extra['clear_color'] = color.ENDC logger = Logger('root') # extra info processor = Processor(inject_extra) processor.push_application() # for screen log screen_level = INFO stream_handler = StreamHandler(sys.stdout, level=screen_level, bubble=True) stream_handler.format_string = formatter['screen'] stream_handler.push_application() # for rolling file log p = os.environ['FBPATH'] if not os.path.isdir(p): os.system('mkdir -p {}'.format(p)) file_path = os.path.expanduser(os.path.join(p, 'logs')) if os.path.isdir(file_path): backup_count = 7 max_size = 1024 * 1024 * 1024 # 1Gi file_level = DEBUG each_size = max_size / (backup_count + 1) filename = os.path.join(file_path, 'ltcli-rotate.log') rotating_file_handler = RotatingFileHandler(filename=filename, level=file_level, bubble=True,
def level(s): return r_word.search(s).start() // 4 source, dest = [], [] current_level = -1 debugging = False if debugging: dhandler = NullHandler(level = DEBUG) dhandler.format_string = '{record.message}' dhandler.push_application() handler = StreamHandler(stdout, level = NOTICE) handler.format_string = '{record.message}' handler.push_application() for s in open("todo.txt"): l = level(s) debug("levels {}, {}".format(current_level, l)) s = s.strip() if not s: continue if l > current_level: d = join(downloads_home, *dest) if not isdir(d): mkdir(d) if l <= current_level: if current_level: store() source = source[:l] dest = dest[:l] debug("reduce to {}, {}".format(source,dest))
logger.handlers.append(_rfh) # Check if running as PyInstaller generated frozen executable. FROZEN = True if hasattr(sys, "frozen") else False # No console window in frozen mode. if FROZEN: logger.info("not adding stdout logging handler in frozen mode") else: _sh = StreamHandler(sys.stdout, level="INFO") logger.info("adding logging handler: {h}", h=_sh) _sh.format_string = ( "[{record.time}] {record.level_name}: {record.channel}: " "{record.func_name}(): {record.message}" ) _sh.push_application() logger.handlers.append(_sh) class VNGameExeNotFoundError(Exception): pass def find_ww_workshop_content() -> List[Path]: proc_arch = os.environ["PROCESSOR_ARCHITECTURE"].lower() try: proc_arch64 = os.environ["PROCESSOR_ARCHITEW6432"].lower() except KeyError: proc_arch64 = None if proc_arch == "x86" and not proc_arch64:
from logbook import warn, StreamHandler import sys from termcc.cc import cc my_handler = StreamHandler(sys.stdout) my_handler.push_application() warn(cc(':red: :yin_yang: This is a warning :reset:')) import os from logbook import Processor def inject_cwd(record): record.extra['cwd'] = os.getcwd() with my_handler.applicationbound(): with Processor(inject_cwd).applicationbound(): warn(cc(':blue: :yin_yang: This is a warning'))