def setup_logger(logger: logging.Logger, level: LevelIsh) -> None: lvl = mklevel(level) try: import logzero # type: ignore[import] formatter = logzero.LogFormatter( fmt=FORMAT_COLOR, datefmt=DATEFMT, ) use_logzero = True except ModuleNotFoundError: warnings.warn( "You might want to install 'logzero' for nice colored logs!") formatter = logging.Formatter(fmt=FORMAT_NOCOLOR, datefmt=DATEFMT) use_logzero = False if use_logzero and not COLLAPSE_DEBUG_LOGS: # all set, nothing to do # 'simple' setup logzero.setup_logger(logger.name, level=lvl, formatter=formatter) return h = CollapseDebugHandler( ) if COLLAPSE_DEBUG_LOGS else logging.StreamHandler() logger.setLevel(lvl) h.setLevel(lvl) h.setFormatter(formatter) logger.addHandler(h) logger.propagate = False # ugh. otherwise it duplicates log messages
def _get_logger(self, name, record = False): '''获取Logger''' if record: logger = setup_logger(name, logfile=self.logfile, formatter=RECORD_FORMAT, disableStderrLogger=True) else: logger = setup_logger(name, formatter=LOG_FORMAT) return logger
def setup_logger(logger: Union[str, logging.Logger], level='DEBUG', **kwargs): """ Wrapper to simplify logging setup. """ def mklevel(level: Union[int, str]) -> int: if isinstance(level, str): level = level.upper() return getattr(logging, level) else: return level lvl = mklevel(level) if isinstance(logger, str): logger = logging.getLogger(logger) try: # try logzero first, so user gets nice colored logs import logzero # type: ignore # TODO meh, default formatter shorthands logging levels making it harder to search errors.. except ModuleNotFoundError: import warnings warnings.warn("You might want to install 'logzero' for nice colored logs") # ugh. why does it have to be so verbose? logger.setLevel(lvl) ch = logging.StreamHandler() ch.setLevel(lvl) FMT = '[%(levelname)s %(name)s %(asctime)s %(filename)s:%(lineno)d] %(message)s' ch.setFormatter(logging.Formatter(FMT)) logger.addHandler(ch) else: logzero.setup_logger(logger.name, level=lvl)
def __init__(self, app: Flask, webhook_url, token, test_mode=False): logzero.setup_logger(telegram.__name__, level=logging.INFO) self._webhook_url = webhook_url self._app = app self._token = token self.updater = None # type: Updater self.bot = None # type: Bot self._test_mode = test_mode self._running = False self.__threads = list()
def setup_logzero(logger, *args, **kwargs): import logzero # type: ignore formatter = logzero.LogFormatter( fmt= '%(color)s[%(levelname)s %(name)s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' ) logzero.setup_logger( *args, **kwargs, name=logger.name, formatter=formatter, )
def setup_logger(name: str = "duro", stdout: bool = False) -> Logger: if stdout: return logzero.setup_logger(name=name, level=INFO) path = load_global_config().logs_path makedirs(path, exist_ok=True) logfile = f"{path}/{name}.log" return logzero.setup_logger(name=name, logfile=logfile, level=INFO, maxBytes=1_000_000)
def test_root_logger(capsys): """ Test creating a root logger """ logzero.reset_default_logger() logger1 = logzero.setup_logger() assert logger1.name == 'logzero' logger2 = logzero.setup_logger(isRootLogger=True) assert logger2.name == 'root' logger3 = logzero.setup_logger(name='') assert logger3.name == 'root'
def json_logger(): json_format = LoggerHelper.json_formatter() logger1 = logzero.setup_logger( name="json_log", logfile="tmp.log", formatter=LoggerHelper.simple_formatter()) return logger1
def dump_common_info(self): logger = logzero.setup_logger( name='preprocess', logfile=str(self.log_dir / f'{self.sdtime}_preprocess.log')) logger.info('=== Common informations ===') logger.info('Model: {}'.format(self.args.arch)) logger.info('Word embedding: {}'.format(self.args.word_embedding)) logger.info('Training type: {}'.format(self.args.training_type)) logger.info('# of epoch: {}'.format(self.args.epoch)) logger.info('# of batchsize: {}'.format(self.args.batchsize)) logger.info('# of encoder layers: {}'.format(self.args.layer)) logger.info('# of genre embedding dim: {}'.format( self.args.genre_unit)) logger.info('Dropout ratio: {}'.format(self.args.dropout)) logger.info('Weight decay: {}'.format(self.args.weight_decay)) logger.info('GPU ID: {}'.format(self.args.gpu)) logger.info('Cross validation: {}, # of folds: {}'.format( self.args.cv, self.args.fold)) logger.info('Seed: {}'.format(self.args.seed)) logger.info('GroupKFold: {}'.format(self.args.group)) logger.info('Apply impression normalization: {}'.format( self.args.imp_norm)) logger.info('Target objective: {}'.format(self.args.objective))
def vt_eod(oneoff=False): import datetime # logger = logging.getLogger("web2py.app.vbdp") # Log directly to the web2py log as scheduler seems not to run in the same context as the webapp. import logzero logger = logzero.setup_logger( logfile="web2py.log", formatter=logging.Formatter( '%(asctime)s - %(levelname)-7s - %(funcName)s - %(message)s'), disableStderrLogger=True) # Determine next start timestamp nextstarttime = datetime.time(22, 0, 0, 0) nextstartdate = datetime.date.today() + datetime.timedelta(days=1) nextstart = datetime.datetime.combine(nextstartdate, nextstarttime) # Run uploader result = False try: result = vtfuncs.eod_upload_run(logger) except Exception: logger.exception("Unhandled exception in vt_eod schedule runner!") finally: # if not oneoff: # # ALWAYS requeue new scheduler run! # logger.info("Queueing next run for {}".format(nextstart.strftime("%d/%m/%Y %H:%M:%S"))) # vtscheduler.queue_task(vt_eod, start_time=nextstart, repeats=1) # Set pvars={"oneoff":True} if oneoff # db2.commit() return result
def main(repo: str): global log logging_setup = dict( name="logger_finder:{repo}", level=INFO, logfile= f"/mnt/c/Users/mtuli/devel/python/tcc/logs/logger_finder({repo.replace('/', '__')}).log", fileLoglevel=INFO, maxBytes=1024000, backupCount=16, ) log = setup_logger(**logging_setup) log.info(f"Began: {repo}") paths = get_paths(repo) with ThreadPoolExecutor(max_workers=60) as ex: mapped_loggers = ex.map(logger_finder, paths) with open(f"/mnt/c/Users/mtuli/devel/python/tcc/output/{output_csv}", "a", encoding="utf-8", newline="") as f: csv = DictWriter(f=f, fieldnames=columns) try: for path in list(mapped_loggers): [csv.writerow(row) for row in path] except UnicodeDecodeError as e: log.error(f"{repo} | UnicodeDecodeError: {e}") log.info(f"Ended: {repo}")
def run(self): logger = logzero.setup_logger("busy", disableStderrLogger=True, **LOGGER_CONFIG) while True: logger.info("a")
def __init__(self, log_file: str = None, device: str = "cpu"): self.device = device self.models_cache_dir = pathlib.Path(MODELS_CACHE_DIR) if not self.models_cache_dir.is_dir(): self.models_cache_dir.mkdir(parents=True) self.final_model_dir = self.models_cache_dir.joinpath( "sectlabel_elmo_bilstm") self.model_filepath = self.final_model_dir.joinpath("best_model.pt") self.data_dir = pathlib.Path(DATA_DIR) if not self.data_dir.is_dir(): self.data_dir.mkdir(parents=True) self.train_data_url = DATA_FILE_URLS["SECT_LABEL_TRAIN_FILE"] self.dev_data_url = DATA_FILE_URLS["SECT_LABEL_DEV_FILE"] self.test_data_url = DATA_FILE_URLS["SECT_LABEL_TEST_FILE"] self.msg_printer = wasabi.Printer() self._download_if_required() self.data_manager = self._get_data() self.hparams = self._get_hparams() self.model = self._get_model() self.infer = self._get_infer_client() self.cli_interact = SciWINGInteract(self.infer) self.log_file = log_file if log_file: self.logger = setup_logger("sectlabel_logger", logfile=self.log_file, level=logging.INFO) else: self.logger = self.msg_printer
def getLogger( name: str, level: int = logzero.logging.DEBUG, formatter: Optional[logzero.LogFormatter] = logzero.LogFormatter( fmt=LOG_FORMAT), ) -> logzero.logger: """Formats and sets up the logger instance. Args: name (str): The name of the Logger. level (int): The default level (logzero.logging.INFO = 20) of the logger. formatter (:obj:, optional): The format of the log message. Defaults to the default logzero format. Returns: An instance of a logger. Examples: >>> from edgePy.util import getLogger >>> log = getLogger(name="script") >>> log.info('This is your DGElist.') ... Notes: 1. See https://docs.python.org/3/library/logging.html#levels for more information about logging levels. """ log_formatter = (logzero.LogFormatter( fmt=logzero.LogFormatter.DEFAULT_FORMAT) if formatter is None else formatter) logger = logzero.setup_logger(name=name, level=level, formatter=log_formatter) return logger
def main(config: SklearnConfig): dataroot = config.trainer.dataroot if not os.path.isabs(dataroot): config.trainer.dataroot = os.path.join(hydra.utils.get_original_cwd(), dataroot) model_path = config.trainer.model_path if not os.path.isabs(model_path): config.trainer.model_path = os.path.join( hydra.utils.get_original_cwd(), model_path) print(OmegaConf.to_yaml(config)) if config.search_type != SearchType.NONE: assert (config.model_type != ModelType.ALL ), "model_type should not be ALL when search_type is not NONE" pd.options.display.precision = 3 pd.options.display.max_columns = 30 logger = setup_logger(__name__) trainer = SklearnTrainer(config, logger) if config.trainer.predict: print(trainer.predict()) sys.exit() trainer.train()
def __init__(self, config, arguments, start_time=datetime.datetime.now(), name=None, output_path=None, make_output=True, percent_populations: list = None, population_vector: TextIOWrapper = None): """ Initializes GTAModelPopSyn class responsible for building control totals and processing the input seed data. :param config: :param arguments: :param start_time: """ self._config = config self._start_time = start_time self._name = name self._population_vector = population_vector self._columns = [] if percent_populations is None: self._percent_populations = [1.0] else: self._percent_populations = percent_populations for percent_population in self._percent_populations: if make_output: os.makedirs( f'{config["OutputFolder"]}/{(self._name + "_") if name else ""}' f'{start_time:%Y-%m-%d_%H-%M}_{percent_population}/', exist_ok=True) self._arguments = arguments self._output_path = f'{self._config["OutputFolder"]}/{(self._name + "_") if name else ""}' \ f'{self._start_time:%Y-%m-%d_%H-%M}_' \ f'{percent_population}' if output_path is None else output_path self._logger = setup_logger( name='gtamodel', logfile=f'{self._output_path}/gtamodel_popsyn.log') self._logger.info(f'GTAModel PopSyn') self._popsyn_config = GTAModelPopSynConfig(self) self._popsyn_config.initialize() self._summary_report = ValidationReport(self) self._summary_report.popsyn_config = self._popsyn_config self._control_totals_builder = ControlTotalsBuilder(self) self._control_totals_builder.popsyn_config = self._popsyn_config self._input_processor = InputProcessor( self, self._control_totals_builder) self._input_processor.popsyn_config = self._popsyn_config self._output_processor = OutputProcessor(self, percent_population) self._output_processor.popsyn_config = self._popsyn_config self._database_processor = DatabaseProcessor( self, percent_population) self._settings_processor = SettingsProcessor(self) os.makedirs(f'{self._output_path}/Inputs/', exist_ok=True) self._popsyn_config = GTAModelPopSynConfig(self) self._copy_config_files() return
def syslog(name='app', level=logging.DEBUG, fmt=DEFAULT_FORMAT): formatter = logzero.LogFormatter(fmt=fmt) return logzero.setup_logger( name=name, level=level, formatter=formatter )
def run(self): logger = logzero.setup_logger("lazy", **LOGGER_CONFIG) for _ in range(1000): logger.warn("b") # Log at approx. 10 Hz time.sleep(0.1)
def __init__(self, log_folder, tensorboard_dir, log_interval): log_file = str(log_folder / 'log') self.logger = logzero.setup_logger( name='main', logfile=log_file, level=20, fileLoglevel=10, formatter=None, ) self.metrics = { "epoch": 0, "iteration": 1, "loss_gen": 0., "loss_idis": 0., "loss_vdis": 0., "elapsed_time": 0, } self.log_interval = log_interval self.writer = SummaryWriter(str(tensorboard_dir)) self.start_time = time.time() self.display_metric_names()
def __init__(self, d: "uiautomator2.Device"): """ Args: d (uiautomator2 instance) """ self._d = d assert hasattr(d, "click") assert hasattr(d, "swipe") assert hasattr(d, "window_size") assert hasattr(d, "dump_hierarchy") assert hasattr(d, "screenshot") self._watchers = [] # item: {"xpath": .., "callback": func} self._timeout = 10.0 self._click_before_delay = 0.0 self._click_after_delay = None self._last_source = None # used for click("#back") and back is the key self._alias = {} self._alias_strict = False self._watch_stop_event = threading.Event() self._watch_stopped = threading.Event() self._dump_lock = threading.Lock() self.logger = setup_logger()
def __init__(self, d: "uiautomator2.Device"): """ Args: d (uiautomator2 instance) """ self._d = d assert hasattr(d, "click") assert hasattr(d, "swipe") assert hasattr(d, "window_size") assert hasattr(d, "dump_hierarchy") assert hasattr(d, "screenshot") self._watchers = [] # item: {"xpath": .., "callback": func} self._timeout = 10.0 self._click_before_delay = 0.0 # pre delay self._click_after_delay = None # post delay self._last_source = None self._event_callbacks = defaultdict(list) # used for click("#back") and back is the key self._alias = {} self._alias_strict = False self._dump_lock = threading.Lock() self._watch_stop_event = threading.Event() self._watch_stopped = threading.Event() self._watch_running = False # func run_watchers is calling self._watching = False # func watch_forever is calling # 这里setup_logger不可以通过level参数传入logging.INFO # 不然其StreamHandler都要重新setLevel,没看懂也没关系,反正就是不要这么干. 特此备注 self.logger = setup_logger() self.logger.setLevel(logging.INFO)
def test_unicode(): """ Should log unicode """ logzero.reset_default_logger() temp = tempfile.NamedTemporaryFile() try: logger = logzero.setup_logger(logfile=temp.name) msg = "😄 😁 😆 😅 😂" logger.debug(msg) with open(temp.name, "rb") as f: content = f.read() right_ans_nt_compat = { ("\\xf0\\x9f\\x98\\x84 \\xf0\\x9f\\x98\\x81 \\xf0\\x9" "f\\x98\\x86 \\xf0\\x9f\\x98\\x85 \\xf0\\x9f\\x98\\x8" "2\\r\\n'"), ("\\xf0\\x9f\\x98\\x84 \\xf0\\x9f\\x98\\x81 \\xf0\\x9" "f\\x98\\x86 \\xf0\\x9f\\x98\\x85 \\xf0\\x9f\\x98\\x8" "2\\n'") # notice nt use \\r\\n for a new line } assert any(right_ans in repr(content) for right_ans in right_ans_nt_compat) finally: temp.close()
def __init__(self, d: "uiautomator2.Device"): """ Args: d (uiautomator2 instance) """ self._d = d assert hasattr(d, "click") assert hasattr(d, "swipe") assert hasattr(d, "window_size") assert hasattr(d, "dump_hierarchy") assert hasattr(d, "screenshot") assert hasattr(d, 'wait_timeout') self._click_before_delay = 0.0 # pre delay self._click_after_delay = None # post delay self._last_source = None self._event_callbacks = defaultdict(list) # used for click("#back") and back is the key self._alias = {} self._alias_strict = False self._dump_lock = threading.Lock() # 这里setup_logger不可以通过level参数传入logging.INFO # 不然其StreamHandler都要重新setLevel,没看懂也没关系,反正就是不要这么干. 特此备注 self._logger = setup_logger() self._logger.setLevel(logging.INFO)
def main(config: TrainerConfig): dataroot = config.dataroot if not os.path.isabs(dataroot): config.dataroot = os.path.join(hydra.utils.get_original_cwd(), dataroot) print(OmegaConf.to_yaml(config)) pd.options.display.precision = 3 pd.options.display.max_columns = 30 mlflow.set_tracking_uri(f"{config.dataroot}/mlruns") mlflow.start_run(run_name=config.name) mlflow.log_params(OmegaConf.to_container(config)) logger = setup_logger(name=__name__, level=config.loglevel.name) trainer = Trainer(config, logger) if config.eval_only: trainer.eval(0) sys.exit() if config.predict: trainer.predict() sys.exit() trainer.main()
def set_log_level(level): """ Adapting the log level for information display through process. :param level: str, [debug/info/warning/error] """ # Ensuring the global logger is overwritten global logger level_table = { 'debug': logging.DEBUG, 'warn': logging.WARNING, 'warning': logging.WARNING, 'info': logging.INFO, 'error': logging.ERROR } log_level = level_table[level.lower()] if log_level == logging.DEBUG: formatter = logzero.LogFormatter( fmt= "%(color)s[%(levelname)1.1s %(module)s:%(lineno)d]%(end_color)s %(message)s" ) else: formatter = logzero.LogFormatter( fmt="%(color)s[%(levelname)1.1s]%(end_color)s %(message)s") logger = logzero.setup_logger(formatter=formatter, level=log_level)
def __init__(self, tenant_name, client_id, client_secret, chasing_enable=True, logging_enable=False): # settings of logging self.logger = setup_logger() self.logger.disabled = not logging_enable # settings of adal and ms graph self.graph_base_url = 'https://graph.microsoft.com/' self.graph_version = 'beta' # or 'v1.0' self.authority_url = f'https://login.microsoftonline.com/{tenant_name}' self.client_id = client_id self.client_secret = client_secret # chase next links self.chasing_enable = chasing_enable # refresh token 10 minutes before expiration self.time_to_refresh_token_sec = 600 self.context = adal.AuthenticationContext(self.authority_url) self.token = self._get_token() # settings of retrying self.retry_enabled = True self.wait_random_min = 3 self.wait_random_max = 5
def get_logger(self): formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = setup_logger(self.task_name, formatter=formatter) logger.setLevel(logging.DEBUG) return logger
def _get_logger(self, name): '''获取logger''' fmt = LogFormatter( fmt='[%(levelname)1.1s %(asctime)s] %(name)s: %(message)s') return setup_logger(name, logfile=devnull, formatter=fmt, disableStderrLogger=True)
def setup_logging(logfile=None, debug_mode=False): logger = setup_logger(logfile=logfile) if debug_mode: logger.level = logging.DEBUG else: logger.level = logging.INFO return logger
def __init__(self, logName, path='.', clevel=logging.INFO): ''' :param logName: log的文件名 :param path: log路径 :param clevel: log的等级 ''' self.logger = setup_logger( name=logName, logfile=path, level=clevel )
3) trains the Word2vec model (skipgram + negative sampling); currently, there are zero hyperparameter tuning. """ import os import spacy import logging from joblib import cpu_count from string import punctuation from logzero import setup_logger from gensim.models import Phrases from gensim.models import Word2Vec from gensim.models.phrases import Phraser from gensim.models.word2vec import LineSentence from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS logger = setup_logger(name = __name__, logfile = 'word2vec.log', level = logging.INFO) def main(): # ------------------------------------------------------------------------------- # Parameters # the script will most likely work if we swap the TEXTS variable # with any iterable of text (where one element represents a document, # and the whole iterable is the corpus) newsgroups_train = fetch_20newsgroups(subset = 'train') TEXTS = newsgroups_train.data # spacy's english model for text preprocessing NLP = spacy.load('en')
UTCRebootTime = '06:00' # Generally, 1-AM for me beat_count = 0 font = 'standard' subfont = 'cybermedium' green = Fore.GREEN white = Fore.WHITE blue = Fore.BLUE ascii_art1 = figlet_format('Congratulations!', font=font) ascii_art2 = figlet_format('Welcome to Wonderland.!', font=subfont) print('%s%s%s' % (green, ascii_art1, white)) print('%s%s%s' % (blue, ascii_art2, white)) the_time = str(datetime.now().time())[:8] logger = setup_logger(logfile='mysched.log', maxBytes=1000000, backupCount=3) logger.info("We're not in Jupyter Notebook anymore. The time is %s." % the_time) def main(): sched.every(10).minutes.do(heartbeat) next_min = minute_adder(1).strftime('%H:%M') logger.info("When the clock strikes %s, down the rabbit hole with you!" % next_min) sched.every().day.at(next_min).do(the_queue) sched.every().day.at(UTCRebootTime).do(reboot) while True: sched.run_pending() time.sleep(1) def the_queue():
""" Task : Predict if a car purchased at auction is a unfortunate purchase. Output : .csv file containing the prediction """ import os import logging import argparse import numpy as np from joblib import dump, load from logzero import setup_logger from sortedcontainers import SortedSet from sklearn.metrics import roc_auc_score from sklearn.model_selection import train_test_split from mlutils.transformers import Preprocesser from utils import clean, build_xgb, write_output logger = setup_logger(name = __name__, logfile = 'data_challenge.log', level = logging.INFO) def main(): # ----------------------------------------------------------------------------------- # Adjustable Parameters parser = argparse.ArgumentParser() parser.add_argument( '--train', action = 'store_true', help = 'training or scoring') parser.add_argument( '--inputfile', type = str, help = 'input data file name') parser.add_argument( '--outputfile', type = str, help = 'output prediction file name') args = parser.parse_args() # directory for the input data and output prediction:
from collections import defaultdict from datetime import datetime, timedelta from inspect import currentframe, getouterframes from oauth2client.client import OAuth2WebServerFlow from oauth2client import file, tools import gspread import pandas as pd from pyfiglet import figlet_format from logzero import logger, setup_logger from colorama import Fore filename = "oauth.dat" client_id = "769904540573-knscs3mhvd56odnf7i8h3al13kiqulft.apps.googleusercontent.com" client_secret = "D2F1D--b_yKNLrJSPmrn2jik" log = setup_logger(logfile='log.log') # To count how frequently functions have been called. counters = defaultdict(int) def pipulate(tab, rows, cols, columns=None): """All that pipulate really is""" row1, row2 = rows col1, col2 = cols col1, col2 = a1(col1, reverse=True), a1(col2, reverse=True) cl = tab.range(row1, col1, row2, col2) list_of_lists = cl_to_list(cl) if not columns: columns = tab.range(row1, col1, row1, col2)
def get_logger(filename): return setup_logger(name=filename, formatter=CustomFormatter())