def get_rot_file_logger(name, path): """Returns a logger with a rotating file handler""" logger = logging.getLogger(name) handler = RotatingFileHandler(path, backupCount=5, encoding="utf-8") handler.doRollover() # rollover existing log files handler.terminator = "" # click.echo already adds a newline logger.addHandler(handler) logger.setLevel(logging.DEBUG) return logger
def setup_and_get_worker_interceptor_logger(is_for_stdout: bool = True): """Setup a logger to be used to intercept worker log messages. NOTE: The method is not idempotent. Ray worker logs should be treated in a special way because there's a need to intercept stdout and stderr to support various ray features. For example, ray will prepend 0 or 1 in the beggining of each log message to decide if logs should be streamed to driveres. This logger will also setup the RotatingFileHandler for ray workers processes. Args: is_for_stdout(bool): True if logger will be used to intercept stdout. False otherwise. """ file_extension = "out" if is_for_stdout else "err" logger = logging.getLogger(f"ray_default_worker_{file_extension}") logger.setLevel(logging.INFO) # TODO(sang): This is how the job id is propagated to workers now. # But eventually, it will be clearer to just pass the job id. job_id = os.environ.get("RAY_JOB_ID") if args.worker_type == "WORKER": assert job_id is not None, ( "RAY_JOB_ID should be set as an env " "variable within default_worker.py. If you see this error, " "please report it to Ray's Github issue.") worker_name = "worker" else: job_id = ray.JobID.nil() worker_name = "io_worker" # Make sure these values are set already. assert ray.worker._global_node is not None assert ray.worker.global_worker is not None handler = RotatingFileHandler( f"{ray.worker._global_node.get_session_dir_path()}/logs/" f"{worker_name}-" f"{ray.utils.binary_to_hex(ray.worker.global_worker.worker_id)}-" f"{job_id}-{os.getpid()}.{file_extension}") logger.addHandler(handler) # TODO(sang): Add 0 or 1 to decide whether # or not logs are streamed to drivers. handler.setFormatter(logging.Formatter("%(message)s")) # Avoid messages are propagated to parent loggers. logger.propagate = False # Remove the terminator. It is important because we don't want this # logger to add a newline at the end of string. handler.terminator = "" return logger
def __init__(self, log_file_name="main_log.log", log_format="%(asctime)s %(levelname)s %(message)s", terminator=None, print_stdout=True): log_settings = settings.Settings().Logging log_file_dir = log_settings.LogFileDir log_formatter = logging.Formatter(log_format) if not os.path.exists(log_file_dir): os.makedirs(log_file_dir) my_handler = RotatingFileHandler(filename=log_file_dir + "/" + log_file_name, mode=log_settings.Mode, maxBytes=log_settings.MaxFileSize, backupCount=log_settings.MaxBackupFiles, encoding=None, delay=0) if terminator is not None: my_handler.terminator = terminator my_handler.setFormatter(log_formatter) my_handler.setLevel(logging.INFO) self._print_stdout = print_stdout self.app_log = logging.getLogger(log_file_name) self.app_log.setLevel(logging.INFO) self.app_log.addHandler(my_handler) self._log_map = { 10: "Debug", 20: "Info", 30: "Warn", 40: "Error"}
# result = result.replace("\n", "") return result logger = logging.getLogger('app') logger.setLevel(logging.DEBUG) logger.propagate = False handler = RotatingFileHandler('log/app.log', maxBytes=512 * 1024 * 1024, backupCount=2) # formatter = logging.Formatter('[%(asctime)s] [p%(process)s] [%(funcName)s] [%(pathname)s:%(lineno)d] [%(levelname)s] - %(message)s','%m-%d %H:%M:%S') formatter = SingleLineFormatter( '[%(asctime)s] [p%(process)s] [%(funcName)s] [%(pathname)s:%(lineno)d] [%(levelname)s] - %(message)s', '%m-%d %H:%M:%S') handler.setFormatter(formatter) handler.terminator = "" logger.addHandler(handler) TCP_LOG_ADDR = os.getenv('TCP_LOG_ADDR', '134.209.98.218') TCP_LOG_PORT = os.getenv('TCP_LOG_PORT', 5500) TCP_LOG_ENABLE = os.getenv('TCP_LOG_ENABLE', True) tcp_logger = logging.getLogger('python-logstash-logger') tcp_logger.setLevel(logging.INFO) tcp_logger.addHandler( logstash.TCPLogstashHandler(TCP_LOG_ADDR, TCP_LOG_PORT, version=1)) def tcp_log(msg, log_type='info', extra=None): try: if not TCP_LOG_ENABLE: return