def capture_output(name: str = 'output', level: str = 'TRACE') -> Generator[None, None, None]: """ Context manager that captures all output while it's open (even from C libraries) and logs it. Based on https://stackoverflow.com/a/22434262/7759017 """ stdout_fd = sys.stdout.fileno() stderr_fd = sys.stderr.fileno() with os.fdopen(os.dup(stdout_fd), 'w') as copied_out, \ os.fdopen(os.dup(stderr_fd), 'w') as copied_err, \ tempfile.NamedTemporaryFile('w+') as temp_out: libc.fflush(None) sys.stdout.flush() sys.stderr.flush() os.dup2(temp_out.fileno(), stdout_fd) os.dup2(temp_out.fileno(), stderr_fd) try: yield finally: libc.fflush(None) sys.stdout.flush() os.dup2(copied_out.fileno(), stdout_fd) os.dup2(copied_err.fileno(), stderr_fd) temp_out.seek(0) record = {'name': name, 'function': '', 'line': ''} for line in temp_out.readlines(): logger.patch(lambda r: r.update(record)).log( level, line.rstrip()) # type: ignore[arg-type]
def batch_extract( self, files: List[Path], output_folder: Union[str, os.PathLike], *, first_page: int = 1, last_page: Optional[int] = None, ) -> None: """Extracts text from multiple PDF-files and saves result as text files (one file per page). Args: files (List[Path]): List of PDF-files to process output_folder (Union[str, os.PathLike]): Output folder first_page (int, optional): First page to extract. Defaults to 1. last_page (Optional[int], optional): Last page to extract. Defaults to None. """ logfile = Path(output_folder) / 'extract.log' if logfile.exists(): files = self._skip_completed(files, logfile) if len(files) == 0: return file_logger = self._add_logger(logfile) logger.patch(lambda msg: tqdm.write(msg, end='')) pbar = tqdm(files, desc='File') for filename in pbar: pbar.set_description(f'Processing {filename.stem}') self.pdf_to_txt(filename, output_folder, first_page, last_page) self._remove_logger(file_logger)
def start( filename: str = None, level: str = 'INFO', to_console: bool = True, to_file: bool = True ): """After initialization, start file logging. """ global _logging_started assert _logging_configured if _logging_started: return if level == 'NONE': return logger.add(PropagateHandler(), format="{message}") # Make sure stdlib logger is set so that dependency logging gets propagated logging.getLogger().setLevel(logger.level(level).no) if to_file and filename: logger.add( filename, level=level, rotation=int(os.environ.get(ENV_MAXBYTES, 1000 * 1024)), retention=int(os.environ.get(ENV_MAXCOUNT, 9)), encoding='utf-8', format=LOG_FORMAT, ) # without --cron we log to console if to_console: if servicelogging and not sys.stderr.isatty(): servicelogging.add_loguru_level_names() handler = servicelogging.choose_handler() handler.setFormatter(logging.Formatter( fmt=servicelogging.SYSLOG_PREFIX + '%(task)-15s %(message)s')) logger.add(handler) if not sys.stdout: logger.debug("No sys.stdout, can't log to console.") else: # Make sure we don't send any characters that the current terminal doesn't support printing safe_stdout = codecs.getwriter(io_encoding)(sys.stdout.buffer, 'replace') colorize = None # Auto-detection for colorize doesn't seem to work properly for PyCharm. if "PYCHARM_HOSTED" in os.environ: colorize = True logger.add(safe_stdout, level=level, format=LOG_FORMAT, colorize=colorize) # flush what we have stored from the plugin initialization global _startup_buffer, _startup_buffer_id if _startup_buffer_id: logger.remove(_startup_buffer_id) for record in _startup_buffer: level, message = record['level'].name, record['message'] logger.patch(lambda r: r.update(record)).log(level, message) _startup_buffer = [] _startup_buffer_id = None _logging_started = True
def test_message_update_not_overridden_by_patch(writer, colors): def patcher(record): record["message"] += " [Patched]" logger.add(writer, format="{level} {message}", colorize=True) logger.patch(patcher).opt(colors=colors).info("Message") assert writer.read() == "INFO Message [Patched]\n"
def test_patch_record_process(writer): def patch(record): record["process"].id = 123 record["process"].name = "Process-123" logger.add(writer, format="{process} {process.name} {process.id}") logger.patch(patch).info("Test") assert writer.read() == "123 Process-123 123\n"
def test_patch_record_thread(writer): def patch(record): record["thread"].id = 111 record["thread"].name = "Thread-111" logger.add(writer, format="{thread} {thread.name} {thread.id}") logger.patch(patch).info("Test") assert writer.read() == "111 Thread-111 111\n"
def test_patch_record_file(writer): def patch(record): record["file"].name = "456" record["file"].path = "123/456" logger.add(writer, format="{file} {file.name} {file.path}") logger.patch(patch).info("Test") assert writer.read() == "456 456 123/456\n"
def test_patch_record_exception(writer): def patch(record): record["exception"].traceback = None logger.add(writer, format="") try: 1 / 0 except ZeroDivisionError: logger.patch(patch).exception("Error") assert writer.read() == "\nZeroDivisionError: division by zero\n"
def handle(self): while True: chunk = self.connection.recv(4) if len(chunk) < 4: break slen = struct.unpack('>L', chunk)[0] chunk = self.connection.recv(slen) while len(chunk) < slen: chunk = chunk + self.connection.recv(slen - len(chunk)) record = pickle.loads(chunk) level, message = record["level"], record["message"] logger.patch(lambda record: record.update(record)).log( level, message)
def test_multiple_patches(writer): def patch_1(record): record["extra"]["a"] = 5 def patch_2(record): record["extra"]["a"] += 1 def patch_3(record): record["extra"]["a"] *= 2 logger.add(writer, format="{extra[a]} {message}") logger.patch(patch_1).patch(patch_2).patch(patch_3).info("Test") assert writer.read() == "12 Test\n"
def test_pickling_patched_logger(writer): patched_logger = logger.patch(patch_function) pickled = pickle.dumps(patched_logger) unpickled = pickle.loads(pickled) unpickled.add(writer, format="{extra[foo]}") unpickled.info("Test") assert writer.read() == "bar\n"
def test_add_using_patched(writer): logger.configure(patch=lambda r: r["extra"].update(a=-1)) logger_patched = logger.patch(lambda r: r["extra"].update(a=0)) logger_patched.add(writer, format="{extra[a]} {message}") logger.debug("A") logger_patched.debug("B") assert writer.read() == "-1 A\n0 B\n"
def test_override_configured(writer): logger.configure(patch=lambda r: r["extra"].update(a=123, b=678)) logger2 = logger.patch(lambda r: r["extra"].update(a=456)) logger2.add(writer, format="{extra[a]} {extra[b]} {message}") logger2.debug("!") assert writer.read() == "456 678 !\n"
def test_not_override_parent_logger(writer): logger_1 = logger.patch(lambda r: r["extra"].update(a="a")) logger_2 = logger_1.patch(lambda r: r["extra"].update(a="A")) logger.add(writer, format="{extra[a]} {message}") logger_1.debug("1") logger_2.debug("2") assert writer.read() == "a 1\nA 2\n"
def test_no_conflict(writer): logger_ = logger.patch(lambda r: None) logger_2 = logger_.patch(lambda r: r["extra"].update(a=2)) logger_3 = logger_.patch(lambda r: r["extra"].update(a=3)) logger.add(writer, format="{extra[a]} {message}") logger_2.debug("222") logger_3.debug("333") assert writer.read() == "2 222\n3 333\n"
from random import uniform from loguru import logger LOG_PATH = os.environ.get('LOG_PATH') LOG_LEVEL = os.environ.get('LOG_LEVEL', "INFO") if LOG_PATH: logger.add( LOG_PATH, rotation="100 MB", enqueue=True, # 异步 encoding="utf-8", backtrace=True, diagnose=True, level=LOG_LEVEL) # 日志采样输出:按时间 按条数 def log_sampler(log, bins=10): if uniform(0, bins) < 1: logger.info(log) # todo: # add zk/es/mongo/hdfs logger # logger = logger.patch(lambda r: r.update(name=__file__)) logger_patch = lambda name: logger.patch(lambda r: r.update(name=name) ) # main模块: 等价于 __name__=__file__ if __name__ == '__main__': logger.info("xx")
def test_override_previous_patched(writer): logger.add(writer, format="{extra[x]} {message}") logger2 = logger.patch(lambda r: r["extra"].update(x=3)) logger2 = logger2.patch(lambda r: r["extra"].update(x=2)).debug("4") assert writer.read() == "2 4\n"
def test_patch_after_add(writer): logger.add(writer, format="{extra[a]} {message}") logger_patched = logger.patch(lambda r: r["extra"].update(a=0)) logger_patched.debug("A") assert writer.read() == "0 A\n"
def test_message_update_not_overridden_by_raw(writer, colors): logger.add(writer, colorize=True) logger.patch(lambda r: r.update(message="Updated!")).opt( raw=True, colors=colors).info("Raw!") assert writer.read() == "Updated!"
def exposed_log_sink(self, message): message = rpyc.classic.obtain(message) record = message.record level, message = record['level'].name, record['message'] logger.patch(lambda r: r.update(record)).log(level, message)
op_codes = { 0: "", # ignore this code so that the event can be properly displayed 1: "Heartbeat", 2: "Identify", 3: "Presence Update", 4: "Voice State Update", 6: "Resume", 7: "Reconnect", 8: "Request Guild Members", 9: "Invalid Session", 10: "Hello", 11: "Heartbeat ACK", } gateway_logger = logger.patch(lambda record: record.update(name="discord.gateway")) def log_gateway_events(data): remove_sensitive_info(data, ["token"]) direction = data["gateway"].get("direction") event = data["gateway"].get("t") op_code = data["gateway"].get("op") gateway_logger.debug( f"{'Received' if direction == 'in' else 'Dispatched'} gateway event " f"{op_codes[op_code]}{event if event else ''}", extra=data, )
from loguru import logger as log import sys from multiprocessing.dummy import Pool import platform import psutil log.remove() print_format = '<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green>\ [{extra[node]}:R{extra[rank]} Mem:{extra[mempc]}%]\ <level>{level}\t\ | <cyan>{function}</cyan>:<cyan>{line}</cyan>\ - {message}</level>\ ' log = log.bind(rank=1, node=platform.node()) log = log.patch(lambda record: record['extra'].update( mempc=psutil.virtual_memory().percent)) format = '{time}|{level}\t|{message}' log.add(sys.stderr, format=print_format, colorize=True) log.info('Welcome hello') log.debug('Welcome hello') log.add(open('testlog.log', 'w+'), level='DEBUG') pool = Pool(processes=4) @log.catch(reraise=True) def broken(x): return x / 0
def start(filename: str = None, level: str = 'INFO', to_console: bool = True, to_file: bool = True) -> None: """After initialization, start file logging.""" global _logging_started assert _logging_configured if _logging_started: return if level == 'NONE': return # Make sure stdlib logger is set so that dependency logging gets propagated logging.getLogger().setLevel(logger.level(level).no) if to_file and filename: logger.add( filename, level=level, rotation=int(os.environ.get(ENV_MAXBYTES, 1000 * 1024)), retention=int(os.environ.get(ENV_MAXCOUNT, 9)), encoding='utf-8', format=LOG_FORMAT, filter=_log_filterer, ) # without --cron we log to console if to_console: if not sys.stdout: logger.debug("No sys.stdout, can't log to console.") else: # Make sure we don't send any characters that the current terminal doesn't support printing if sys.version_info >= (3, 7): sys.stdout.reconfigure(errors='replace') out = sys.stdout else: out = io.TextIOWrapper(sys.stdout.buffer, encoding=io_encoding, errors='replace') # Loguru only autodetects whether we need to wrap the stream only when it's sys.__stdout__ # since we've already wrapped it we need to add the colorama support ourselves if os.name == "nt": out = colorama.AnsiToWin32(out, convert=True, strip=False, autoreset=False).stream logger.add(out, level=level, format=LOG_FORMAT, filter=_log_filterer) # flush what we have stored from the plugin initialization global _startup_buffer, _startup_buffer_id if _startup_buffer_id: logger.remove(_startup_buffer_id) for record in _startup_buffer: level, message = record['level'].name, record['message'] logger.patch(lambda r: r.update(record)).log(level, message) _startup_buffer = [] _startup_buffer_id = None _logging_started = True
import asyncio import aiohttp import discord import urllib from loguru import logger from bot.config import Metrics as MetricsConf from bot.utils.metrics import api_histogram, Timer http_logger = logger.patch(lambda record: record.update(name="discord.http")) def sanitize_url(url: str) -> str: path_components = url.split("/") if path_components[5] == "webhooks": path_components[7] = "--token--" if path_components[5] == "interactions": path_components[7] = "--token--" if len(path_components) > 9: if path_components[9] == "reactions": path_components[10] = "--id--" return "/".join(path_components) def remove_ids(url: str) -> str: path_components = url.split("/") for index, component in enumerate(path_components):
async def _processgameline(inst, ptype, line): clog = log.patch(lambda record: record["extra"].update(instance=inst)) logheader = ( f'{Now(fmt="dt").strftime("%Y-%m-%d %H:%M:%S")}|{inst.upper():>8}|{ptype:<7}| ' ) linesplit = removerichtext(line[21:]).split(", ") if ptype == "TRAP": tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) msgsplit = linesplit[2][10:].split("trapped:") playername = msgsplit[0].strip() await asyncputplayerintribe(tribeid, playername) dino = msgsplit[1].strip().replace(")", "").replace("(", "") line = ( f"{logheader}[{playername.title()}] of ({tribename}) has trapped [{dino}]" ) clog.log(ptype, line) elif ptype == "RELEASE": tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) msgsplit = linesplit[2][10:].split("released:") playername = msgsplit[0].strip() await asyncputplayerintribe(tribeid, playername) dino = msgsplit[1].strip().replace(")", "").replace("(", "") line = ( f"{logheader}[{playername.title()}] of ({tribename}) has released [{dino}]" ) clog.log(ptype, line) elif ptype == "DEATH": # clog.debug(f'{ptype} - {linesplit}') tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) if tribename is None: deathsplit = removerichtext(line[21:]).split(" - ", 1) playername = deathsplit[0].strip() if deathsplit[1].find("was killed by") != -1: killedby = (deathsplit[1].split("was killed by")[1].strip() [:-1].replace("()", "").strip()) playerlevel = ( deathsplit[1].split("was killed by")[0].strip().replace( "()", "")) line = f"{logheader}[{playername.title()}] {playerlevel} was killed by [{killedby}]" clog.log(ptype, line) elif deathsplit[1].find("killed!") != -1: level = deathsplit[1].split(" was killed!")[0].strip("()") line = f"{logheader}[{playername.title()}] {level} has been killed" clog.log(ptype, line) else: log.warning(f"not found gameparse death: {deathsplit}") else: pass # log.debug(f'deathskip: {linesplit}') elif ptype == "TAME": # clog.debug(f'{ptype} - {linesplit}') tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) if tribename is None: tamed = linesplit[0].split(" Tamed ")[1].strip(")").strip("!") line = f"{logheader}A tribe has tamed [{tamed}]" clog.log(ptype, line) else: # log.debug(f'TRIBETAME: {inst}, {linesplit}') playername = linesplit[2][10:].split(" Tamed")[0].strip() await asyncputplayerintribe(tribeid, playername) tamed = linesplit[2].split(" Tamed")[1].strip(")").strip( "!").strip() if playername.title() == "Your Tribe": line = f"{logheader}[{tribename}] tamed [{tamed}]" clog.log(ptype, line) else: line = f"{logheader}[{playername.title()}] of ({tribename}) tamed [{tamed}]" clog.log(ptype, line) elif ptype == "DEMO": # clog.debug(f'{ptype} - {linesplit}') tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) if tribename is None: pass # clog.log(ptype, f'{logheader}SINGLDEMO: [{linesplit}]') else: # log.debug(f'TRIBEDEMO: {inst}, {linesplit}') playername = linesplit[2][10:].split(" demolished a ")[0].strip() await asyncputplayerintribe(tribeid, playername) if (len(linesplit[2].split(" demolished a ")) > 0 and linesplit[2].find(" demolished a ") != -1): demoitem = (linesplit[2].split(" demolished a ")[1].replace( "'", "").strip(")").strip("!").strip()) line = f"{logheader}[{playername.title()}] of ({tribename}) demolished a [{demoitem}]" clog.log(ptype, line) elif ptype == "ADMIN": # clog.debug(f'{ptype} - {linesplit}') steamid = linesplit[2].strip()[9:].strip(")") pname = linesplit[0].split("PlayerName: ")[1] cmd = linesplit[0].split("AdminCmd: ")[1].split( " (PlayerName:")[0].upper() if not await asyncisplayeradmin(steamid): clog.warning( f"{logheader}Admin command [{cmd}] executed by NON-ADMIN [{pname.title()}] !" ) await db.update( "INSERT INTO kicklist (instance,steamid) VALUES ('%s','%s')" % (inst, steamid)) await db.update( "UPDATE players SET banned = 'true' WHERE steamid = '%s')" % (steamid, )) else: line = f"{logheader}[{pname.title()}] executed admin command [{cmd}] " clog.log(ptype, line) elif ptype == "DECAY": # clog.debug(f'{ptype} - {linesplit}') tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) decayitem = linesplit[2].split("'", 1)[1].split("'")[0] # decayitem = re.search('\(([^)]+)', linesplit[2]).group(1) line = f"{logheader}Tribe ({tribename}) auto-decayed [{decayitem}]" clog.log(ptype, line) # wglog(inst, removerichtext(line[21:])) elif ptype == "CLAIM": # log.debug(f'{ptype} : {linesplit}') tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) if tribename: if linesplit[2].find(" claimed '") != -1: playername = linesplit[2][10:].split(" claimed ")[0].strip() await asyncputplayerintribe(tribeid, playername) claimitem = linesplit[2].split("'", 1)[1].split("'")[0] line = f"{logheader}[{playername}] of ({tribename}) has claimed [{claimitem}]" clog.log(ptype, line) elif linesplit[2].find(" unclaimed '") != -1: playername = linesplit[2][10:].split(" claimed ")[0].strip() await asyncputplayerintribe(tribeid, playername) claimitem = linesplit[2].split("'", 1)[1].split("'")[0] line = f"{logheader}[{playername}] of ({tribename}) has un-claimed [{claimitem}]" clog.log(ptype, line) else: pass # clog.log(ptype, f'{logheader} SINGLECLAIM: {linesplit}') elif ptype == "TRIBE": # clog.debug(f'{ptype} - {linesplit}') tribename, tribeid = await asyncgettribeinfo(linesplit, inst, ptype) if tribeid is not None: if linesplit[2].find(" was added to the Tribe by ") != -1: playername = (linesplit[2][10:].split( " was added to the Tribe by ")[0].strip()) playername2 = ( linesplit[2][10:].split(" was added to the Tribe by ") [1].strip().strip(")").strip("!")) await asyncputplayerintribe(tribeid, playername) await asyncputplayerintribe(tribeid, playername2) line = f"[{playername.title()}] was added to Tribe ({tribename}) by [{playername2.title()}]" clog.log(ptype, line) elif linesplit[2].find(" was removed from the Tribe!") != -1: playername = (linesplit[2][10:].split( " was removed from the Tribe!")[0].strip()) await asyncremoveplayerintribe(tribeid, playername) line = f"[{playername.title()}] was removed from Tribe ({tribename})" clog.log(ptype, line) elif linesplit[2].find(" was added to the Tribe!") != -1: playername = (linesplit[2][10:].split( " was added to the Tribe!")[0].strip()) await asyncputplayerintribe(tribeid, playername) line = f"[{playername.title()}] was added to the Tribe ({tribename})" clog.log(ptype, line) elif linesplit[2].find(" set to Rank Group ") != -1: playername = linesplit[2][10:].split( " set to Rank Group ")[0].strip() await asyncputplayerintribe(tribeid, playername) rankgroup = (linesplit[2][10:].split(" set to Rank Group ") [1].strip().strip("!")) line = f"[{playername.title()}] set to rank group [{rankgroup}] in Tribe ({tribename})" clog.log(ptype, line) else: clog.log(ptype, f"{logheader}{linesplit}") else: log.debug(f"UNKNOWN {ptype} - {linesplit}") line = f"{linesplit}" clog.log(ptype, line)
import sys from loguru import logger logger.remove() logger.add(sys.stderr, level="DEBUG", format="{message} ({extra[context_id]})") def _patch_func(record): """Patch the logger to set extra.context_id to a generic value if not exists (this happens when logging outside of the request_handler) """ if not record["extra"] or not record["extra"].get("context_id"): record["extra"]["context_id"] = "System" logger = logger.patch(_patch_func)