예제 #1
0
def basicConfig(level='INFO', redirectLogging=False, colorized=False):
    if not colorized:
        handler = StreamHandler(sys.stderr, level=level, bubble=True)
    else:
        handler = ColorizedHandler(level=level, bubble=True)
        handler.force_color()

    handler.format_string = mainFormatString
    handler.push_application()

    if redirectLogging:
        redirect_logging()
        redirect_warnings()
예제 #2
0
    def __init__(self, filename, logname='Backtest', level=NOTSET):
        super().__init__(logname, level)

        path, ext = os.path.splitext(filename)
        now = datetime.datetime.now()
        log_filename = path + '_' + now.strftime('%Y-%m-%d_%H%M') + ".log"
        file_handler = FileHandler(log_filename, level=DEBUG, bubble=True)
        file_handler.format_string = LOG_ENTRY_FMT
        self.handlers.append(file_handler)

        stream_handler = StreamHandler(sys.stdout, level=INFO)
        stream_handler.format_string = LOG_ENTRY_FMT
        self.handlers.append(stream_handler)
예제 #3
0
def setup():
    if not os.path.exists(LOG_FILE_DIR):
        os.mkdir(LOG_FILE_DIR)

    file_handler = TimedRotatingFileHandler(
        filename=LOG_FILE_PATH,
        backup_count=config.get_logging_backup_count())

    stream_handler = StreamHandler(sys.stdout, level='CRITICAL')
    stream_handler.format_string = '{record.level_name}: {record.channel}: {record.message}'

    file_handler.push_application()
    stream_handler.push_application()
예제 #4
0
def initialize(eventlog_file=None):
    """
    Initialize the analytics output. This will cause analytics events to be output to either a file or stdout.

    If this function is not called, analytics events will not be output. If it is called with a filename, the events
    will be output to that file. If it is called with 'STDOUT' or None, the events will be output to stdout.

    :param eventlog_file: The filename to output events to, 'STDOUT' to output to stdout, None to disable event logging
    :type eventlog_file: str | None
    """
    global _analytics_logger, _eventlog_file

    _eventlog_file = eventlog_file
    if not eventlog_file:
        _analytics_logger = None
        return

    if eventlog_file.upper() == 'STDOUT':
        event_handler = StreamHandler(sys.stdout)
    else:
        fs.create_dir(os.path.dirname(eventlog_file))
        previous_log_file_exists = os.path.exists(eventlog_file)

        event_handler = RotatingFileHandler(
            filename=eventlog_file,
            max_size=Configuration['max_eventlog_file_size'],
            backup_count=Configuration['max_eventlog_file_backups'],
        )
        if previous_log_file_exists:
            event_handler.perform_rollover(
            )  # force starting a new eventlog file on application startup

    event_handler.format_string = '{record.message}'  # only output raw log message -- no timestamp or log level
    handler = TaggingHandler(
        {'event': event_handler
         },  # enable logging to the event_handler with the event() method
        bubble=True,
    )
    handler.push_application()

    _analytics_logger = TaggingLogger('analytics', ['event'])
예제 #5
0
def initialize(eventlog_file=None):
    """
    Initialize the analytics output. This will cause analytics events to be output to either a file or stdout.

    If this function is not called, analytics events will not be output. If it is called with a filename, the events
    will be output to that file. If it is called with 'STDOUT' or None, the events will be output to stdout.

    :param eventlog_file: The filename to output events to, 'STDOUT' to output to stdout, None to disable event logging
    :type eventlog_file: str | None
    """
    global _analytics_logger, _eventlog_file

    _eventlog_file = eventlog_file
    if not eventlog_file:
        _analytics_logger = None
        return

    if eventlog_file.upper() == 'STDOUT':
        event_handler = StreamHandler(sys.stdout)
    else:
        fs.create_dir(os.path.dirname(eventlog_file))
        previous_log_file_exists = os.path.exists(eventlog_file)

        event_handler = RotatingFileHandler(
            filename=eventlog_file,
            max_size=Configuration['max_eventlog_file_size'],
            backup_count=Configuration['max_eventlog_file_backups'],
        )
        if previous_log_file_exists:
            event_handler.perform_rollover()  # force starting a new eventlog file on application startup

    event_handler.format_string = '{record.message}'  # only output raw log message -- no timestamp or log level
    handler = TaggingHandler(
        {'event': event_handler},  # enable logging to the event_handler with the event() method
        bubble=True,
    )
    handler.push_application()

    _analytics_logger = TaggingLogger('analytics', ['event'])
예제 #6
0
        "{record.func_name}(): {record.message}"
    )
    _rfh.push_application()
    logger.handlers.append(_rfh)

# Check if running as PyInstaller generated frozen executable.
FROZEN = True if hasattr(sys, "frozen") else False

# No console window in frozen mode.
if FROZEN:
    logger.info("not adding stdout logging handler in frozen mode")
else:
    _sh = StreamHandler(sys.stdout, level="INFO")
    logger.info("adding logging handler: {h}", h=_sh)
    _sh.format_string = (
        "[{record.time}] {record.level_name}: {record.channel}: "
        "{record.func_name}(): {record.message}"
    )
    _sh.push_application()
    logger.handlers.append(_sh)


class VNGameExeNotFoundError(Exception):
    pass


def find_ww_workshop_content() -> List[Path]:
    proc_arch = os.environ["PROCESSOR_ARCHITECTURE"].lower()
    try:
        proc_arch64 = os.environ["PROCESSOR_ARCHITEW6432"].lower()
    except KeyError:
        proc_arch64 = None
예제 #7
0
파일: log.py 프로젝트: mnms/LTCLI
def inject_extra(record):
    record.extra['basename'] = os.path.basename(record.filename)
    record.extra['level_color'] = get_log_color(record.level)
    record.extra['clear_color'] = color.ENDC


logger = Logger('root')

# extra info
processor = Processor(inject_extra)
processor.push_application()

# for screen log
screen_level = INFO
stream_handler = StreamHandler(sys.stdout, level=screen_level, bubble=True)
stream_handler.format_string = formatter['screen']
stream_handler.push_application()

# for rolling file log
p = os.environ['FBPATH']
if not os.path.isdir(p):
    os.system('mkdir -p {}'.format(p))
file_path = os.path.expanduser(os.path.join(p, 'logs'))
if os.path.isdir(file_path):
    backup_count = 7
    max_size = 1024 * 1024 * 1024  # 1Gi
    file_level = DEBUG
    each_size = max_size / (backup_count + 1)
    filename = os.path.join(file_path, 'ltcli-rotate.log')
    rotating_file_handler = RotatingFileHandler(filename=filename,
                                                level=file_level,
예제 #8
0
    return args, state_dict, resume


if __name__ == '__main__':
    args, state_dict, resume = prepare()

    # redirect stdout and stderr to log file
    # redirection = open(log_name, 'a', buffering=1)
    # sys.stdout = redirection
    # sys.stderr = redirection

    stdout_handler = StreamHandler(sys.stdout, bubble=True)
    stderr_handler = StderrHandler(level=WARNING)
    # write logs to log.MODEL file
    # file_handler = FileHandler(log_name, bubble=True)
    # file_handler.format_string = '{record.message},{record.extra[cwd]}'
    # file_handler.format_string = '[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] {record.level_name}: {record.message}'
    # with file_handler.applicationbound():
    stdout_handler.format_string = '[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] ' \
                                   '{record.level_name}: {record.message}'
    with stdout_handler.applicationbound():
        if resume:
            logger.info(
                f'Resume training from checkpoint: {Loader.get_latest(args.model)[1]}'
            )

        try:
            main(args)
        except Exception as e:
            logger.error(f'\n{traceback.format_exc()}')
예제 #9
0
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.

"""Log specific to TalisMUD processes."""

import sys

from logbook import Logger, StreamHandler

stream = StreamHandler(sys.stdout, level="DEBUG", bubble=True)
stream.format_string = "{record.message}"

class ProcessLogger(Logger):

    """Specific logger to work on a given process."""

    def __init__(self, process):
        name = f"process.{process.name}"
        super().__init__(name)
        self.process = process

    def process_record(self, record):
        super().process_record(record)
        record.extra['process'] = self.process.name
예제 #10
0
    return Item1(s)

def level(s):
    return r_word.search(s).start() // 4

        
source, dest = [], []
current_level = -1

debugging = False
if debugging: 
    dhandler = NullHandler(level = DEBUG)
    dhandler.format_string = '{record.message}'
    dhandler.push_application()
handler = StreamHandler(stdout, level = NOTICE)
handler.format_string = '{record.message}'
handler.push_application()


for s in open("todo.txt"):
    l = level(s)
    debug("levels {}, {}".format(current_level, l))
    s = s.strip()
    if not s: continue
    if l > current_level:
        d = join(downloads_home,  *dest)
        if not isdir(d): mkdir(d)
    if l <= current_level:  
        if current_level: store()
        source = source[:l]
        dest = dest[:l]
예제 #11
0
Helper top-level functions are provided so you can easily control
logging in your application (logging is turned off by default, except
if you specify otherwise).  Read
[BUI log](https://bui-project.org/log.html) for more information.

"""

from logbook import FileHandler, Logger, StreamHandler
import sys

# Two handlers are created.  Notice that other parts of the application
# will create handlers as well, these two are general handlers (one stream
# handler configured on `sys.stdout`, one file handler set on "bui.log"
# although you can change the file name).
stream = StreamHandler(sys.stdout, encoding="utf-8", level="INFO", bubble=True)
stream.format_string = (
    "[{record.level_name}] {record.channel}: {record.message}")
file = FileHandler("bui.log",
                   encoding="utf-8",
                   level="INFO",
                   delay=True,
                   bubble=True)
file.format_string = (
    "{record.time:%Y-%m-%d %H:%M:%S.%f%z} [{record.level_name}] "
    "{record.channel}: {record.message}")

# At this point, neither handler is used, if we create the logger and
# write in it, nothing will be logged unless `push_application`
# is called on the handlers.
logger = Logger("bui")

예제 #12
0
import datetime
import linecache
import os
import tracemalloc

# log in local time instead of UTC
set_datetime_format("local")
LOG_ENTRY_FMT = '[{record.time:%Y-%m-%d %H:%M:%S}] {record.level_name}: {record.message}'

logfilename = os.path.join(env["HOME"], "log", "sharadar-zipline.log")
log = Logger('sharadar_db_bundle')
log_file_handler = FileHandler(logfilename, level=DEBUG, bubble=True)
log_file_handler.format_string = LOG_ENTRY_FMT
log.handlers.append(log_file_handler)
log_std_handler = StreamHandler(sys.stdout, level=INFO)
log_std_handler.format_string = LOG_ENTRY_FMT
log.handlers.append(log_std_handler)


def log_top_mem_usage(logger, snapshot, key_type='lineno', limit=10):
    snapshot = snapshot.filter_traces((
        tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
        tracemalloc.Filter(False, "<unknown>"),
    ))
    top_stats = snapshot.statistics(key_type)

    logger.info("Top %s lines" % limit)
    for index, stat in enumerate(top_stats[:limit], 1):
        frame = stat.traceback[0]
        logger.info("#%s: %s:%s: %.1f KiB"
              % (index, frame.filename, frame.lineno, stat.size / 1024))
예제 #13
0
from logbook import Logger, StreamHandler
from logbook import (INFO, DEBUG)
import sys

stream_handler = StreamHandler(sys.stdout, level=INFO)
stream_handler.format_string = '[{record.time:%Y-%m-%d %H:%M:%S}][Mirai] {record.level_name}: {record.channel}: {record.message}'
stream_handler.push_application()

Event = Logger('Event', level=INFO)
Network = Logger("Network", level=DEBUG)
Session = Logger("Session", level=INFO)
Protocol = Logger("Protocol", level=INFO)
예제 #14
0
import time

from logbook import Logger
from logbook import StreamHandler
from rs2wapy import RS2WebAdmin

from simplediscordwh import DiscordWebhook

WEBHOOK_URL = os.environ["WEBHOOK_URL"]
WA_USERNAME = os.environ["WA_USERNAME"]
WA_PASSWORD = os.environ["WA_PASSWORD"]
WA_URL = os.environ["WA_URL"]

handler = StreamHandler(sys.stdout, level="INFO")
handler.format_string = (
    "[{record.time}] {record.level_name}: {record.module}: "
    "{record.func_name}: Process({record.process}): {record.message}")
logger = Logger(__name__)
logger.handlers.append(handler)


def write_exception(e):
    try:
        e = f"{datetime.datetime.now().isoformat()}: {type(e)}: {e}\n"
        print(e)
        with open("errors.log", "a") as f:
            f.write(e)
            f.write("----------------\n")
    except Exception as e:
        logger.error(e)
예제 #15
0
a) 传 格式 字符串
b)hook format function

"""

from logbook import StreamHandler, info
import sys

# 1. set format_string
sh = StreamHandler(
    sys.stdout,
    format_string=
    "[{record.time}] {record.level_name}: {record.channel}:{record.message}")

# 2. set format_string
sh.format_string = "[{record.time}] {record.level_name}: {record.channel}:{record.message}"


# 3. invoke the format function
def my_format_fun(record, handler):
    return " ".join([
        "[" + str(record.time) + "]",
        record.level_name + ":" + record.channel + ":", record.message
    ])


sh.formatter = my_format_fun


def main():
    info("test")
예제 #16
0
파일: log.py 프로젝트: joki2002/test
def inject_extra(record):
    record.extra['basename'] = os.path.basename(record.filename)
    record.extra['level_color'] = get_log_color(record.level)
    record.extra['clear_color'] = color.ENDC


logger = Logger('root')

# extra info
processor = Processor(inject_extra)
processor.push_application()

# for screen log
screen_level = DEBUG
stream_handler = StreamHandler(sys.stdout, level=screen_level, bubble=True)
stream_handler.format_string = formatter['screen_detail']
stream_handler.push_application()


def set_level(level):
    level_list = ['debug', 'info', 'warning', 'error', 'warn']
    if level not in level_list:
        level_list.remove('warn')
        logger.error("LogLevelError: '{}'. Select in {}".format(
            level, level_list))
        return
    code = get_log_code(level)
    stream_handler.level = code
    print(color.white('Changed log level to {}'.format(level)))