コード例 #1
0
 def __init__(self) :
     self.logfile = config.logPath + 'sql.log'
     logzero.logfile(self.logfile, maxBytes = 1e6, backupCount = 3)
     import logging
     formatter = logging.Formatter('%(asctime)-15s - [%(filename)s: %(lineno)s] -%(levelname)s: %(message)s');
     logzero.formatter(formatter)
     self.logger = logzero.logger
コード例 #2
0
ファイル: cli.py プロジェクト: east301/gpipe
 def run_click(debug):
     logzero.loglevel(logging.DEBUG if debug else logging.INFO)
     logzero.formatter(
         logzero.LogFormatter(
             fmt=
             '%(asctime)s %(color)s[%(levelname).1s]%(end_color)s %(message)s',
             datefmt='%Y-%m-%d %H:%M:%S'))
コード例 #3
0
ファイル: cli.py プロジェクト: ravindraprasad75/chaostoolkit
def cli(ctx: click.Context, verbose: bool=False, no_version_check: bool=False,
        change_dir: str=None, no_log_file: bool=False,
        log_file: str="chaostoolkit.log"):
    if verbose:
        logzero.loglevel(logging.DEBUG, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
    else:
        logzero.loglevel(logging.INFO, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"

    if not no_log_file:
        # let's ensure we log at DEBUG level
        logger.setLevel(logging.DEBUG)
        logzero.logfile(
            click.format_filename(log_file), mode='a',
            loglevel=logging.DEBUG)

    logzero.formatter(
        formatter=logzero.LogFormatter(fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S"),
        update_custom_handlers=False)

    subcommand = ctx.invoked_subcommand

    # make it nicer for going through the log file
    logger.debug("#" * 79)
    logger.debug("Running command '{}'".format(subcommand))

    if not no_version_check:
        check_newer_version(command=subcommand)

    if change_dir:
        logger.warning("Moving to {d}".format(d=change_dir))
        os.chdir(change_dir)
コード例 #4
0
 def _spawn(self, config_file, dat):
     self.cfg = Conf(
         config_file=config_file,
         dat=dat,
     ).cfg
     logzero.formatter(LFormatter(log_pre=self.cfg.get('log.symbol', '')))
     logzero.loglevel(self.cfg.get('log.level', 20))
     self.zlog = logzero.logger
コード例 #5
0
ファイル: aspi.py プロジェクト: coderdojo-futurix/aspi
 def __create_datalogfile(self):
     self.currentDatalogFile = AsPiLogFile.generate_fileprefix(
     ) + '.' + LOGFILE_EXT
     logzero.logfile(filename=self.currentDatalogFile,
                     disableStderrLogger=not self.logToStdErr)
     self.formatter = Formatter(fmt=LOG_FORMAT, datefmt=DATE_FORMAT)
     logzero.formatter(self.formatter)
     self.__write_header()
コード例 #6
0
ファイル: log.py プロジェクト: testervic/AutoTest
 def __init__(self) :
     self.logfile = os.path.join(config.logPath, 'core-service.log')
     logzero.logfile(self.logfile, maxBytes = 1e6, backupCount = 3)
     import logging
     formatter = logging.Formatter('%(asctime)-15s - [%(filename)s: %(lineno)s] -%(levelname)s: %(message)s');
     logzero.formatter(formatter)
     logzero.loglevel(logging.INFO)
     self.logger = logzero.logger
コード例 #7
0
ファイル: settings.py プロジェクト: andermy/tornado-ifc
def config_logs():
    logzero.logfile("logfile.log",
                    maxBytes=1000000,
                    backupCount=3,
                    loglevel=logging.ERROR)

    formatter = logging.Formatter('%(asctime)s - %(levelname)s: %(message)s')

    logzero.formatter(formatter)
コード例 #8
0
 def __init__(self):
     self.logfile = path + 'flask.log'
     logzero.logfile(self.logfile, maxBytes=1e6, backupCount=3)
     import logging
     formatter = logging.Formatter(
         '%(asctime)-15s - [%(filename)s: %(lineno)s] -%(levelname)s: %(message)s'
     )
     logzero.formatter(formatter)
     logzero.loglevel(logging.ERROR)
     self.logger = logzero.logger
コード例 #9
0
ファイル: __main__.py プロジェクト: bihealth/sodar-cli
def main(argv=None):
    """Main entry point before parsing command line arguments."""
    # Setup command line parser.
    parser, subparsers = setup_argparse()

    # Actually parse command line arguments.
    args = parser.parse_args(argv)

    # Setup logging incl. verbosity.
    if args.verbose:  # pragma: no cover
        level = logging.DEBUG
    else:
        # Remove module name and line number if not running in debug mode.s
        formatter = logzero.LogFormatter(
            fmt=
            "%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s")
        logzero.formatter(formatter)
        level = logging.INFO
    logzero.loglevel(level=level)

    # Load configuration, if any.
    if args.config:
        config_paths = (args.config, )
    else:
        config_paths = GLOBAL_CONFIG_PATHS
    for config_path in config_paths:
        config_path = os.path.expanduser(os.path.expandvars(config_path))
        if os.path.exists(config_path):
            with open(config_path, "rt") as tomlf:
                toml_config = toml.load(tomlf)
            break
    else:
        toml_config = None
        logger.info("Could not find any of the global configuration files %s.",
                    config_paths)

    # Merge configuration from command line/environment args and configuration file.
    config = CommonConfig.create(args, toml_config)

    # Handle the actual command line.
    cmds = {
        None: run_nocmd,
        "project": run_project,
        "landingzone": run_landingzone,
        "samplesheet": run_samplesheet,
    }

    res = cmds[args.cmd](config, toml_config, args, parser,
                         subparsers.choices[args.cmd] if args.cmd else None)
    if not res:
        logger.info("All done. Have a nice day!")
    else:  # pragma: nocover
        logger.error("Something did not work out correctly.")
    return res
コード例 #10
0
ファイル: log.py プロジェクト: umaobinggary/testproject
 def __init__(self):
     self.logfile = get_path.cwd_path() + "/app/log/flask.txt"
     logzero.logfile(self.logfile,
                     maxBytes=16,
                     backupCount=3,
                     encoding="utf-8")
     formatter = logging.Formatter(
         '%(asctime)-15s - [%(filename)s: %(lineno)s] -%(levelname)s: %(message)s'
     )
     logzero.formatter(formatter)
     logzero.loglevel(logging.INFO)
     self.logger = logzero.logger
コード例 #11
0
    def _run_cases(run, command):
        r = logging.Formatter(
            '%(name)s - %(asctime)-15s - %(levelname)s: %(message)s')
        formatter = logging.Formatter(
            '[%(asctime)s' + ' %(module)s:%(lineno)d -%(levelname)s]' +
            ' - %s' % run.get_device()['model'] + ' - %(message)s')
        logzero.formatter(formatter)
        logzero.logfile(run.get_path() + '/' + 'client.log')
        logger.info('udid: %s', run.get_device()['udid'])

        # set cls.path, it must be call before operate on any page
        path = ReportPath()
        path.set_path(run.get_path())

        run.run(command)
コード例 #12
0
def set_logger(path, loglevel=logging.INFO, tf_board_path=None):
    path_dir = '/'.join(path.split('/')[:-1])
    if not Path(path_dir).exists():
        Path(path_dir).mkdir(parents=True)
    logzero.loglevel(loglevel)
    logzero.formatter(
        logging.Formatter('[%(asctime)s %(levelname)s] %(message)s'))
    logzero.logfile(path)

    if tf_board_path is not None:
        tb_path_dir = '/'.join(tf_board_path.split('/')[:-1])
        if not Path(tb_path_dir).exists():
            Path(tb_path_dir).mkdir(parents=True)
        writer = SummaryWriter(tf_board_path)

        return writer
コード例 #13
0
def cli(verbose: bool = False, change_dir: str = None):
    if verbose:
        logzero.loglevel(logging.DEBUG, update_custom_handlers=True)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
    else:
        logzero.loglevel(logging.INFO, update_custom_handlers=True)
        fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"

    logzero.formatter(formatter=logzero.LogFormatter(
        fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S"),
                      update_custom_handlers=True)

    if change_dir:
        logger.warning("Moving to {d}".format(d=change_dir))
        os.chdir(change_dir)
コード例 #14
0
ファイル: logs.py プロジェクト: yao0013/py3
    def __init__(self, devel=settings.log_devel, filename='autotest.log'):
        # if not isinstance(logging.)
        self.logfile = os.path.join(settings.log_path, filename)
        if not os.path.isdir(settings.log_path):
            os.makedirs(settings.log_path)
        logzero.logfile(self.logfile,
                        maxBytes=1e7,
                        backupCount=3,
                        encoding='utf-8')

        formatter = logging.Formatter(
            '%(asctime)-15s - [%(filename)s: %(lineno)s] - %(levelname)s: %(message)s'
        )
        logzero.formatter(formatter)
        logzero.loglevel(devel)
        self.logger = logzero.logger
コード例 #15
0
def getLogger(name):
    if not name.endswith('.log'):
        name = name + '.log'

    global loggerDict

    if name in loggerDict:
        return loggerDict[name]
    else:
        logzero.logfile(os.path.join(log_path, name),
                        maxBytes=1e6,
                        backupCount=3)
        fmt = logging.Formatter(
            '%(asctime)s%(levelname)8s [%(filename)s:%(lineno)d] %(message)s')
        logzero.formatter(fmt)
        loggerDict[name] = logger

        return loggerDict[name]
コード例 #16
0
ファイル: cli.py プロジェクト: saurabhdevops/chaostoolkit
def cli(ctx: click.Context,
        verbose: bool = False,
        no_version_check: bool = False,
        change_dir: str = None,
        no_log_file: bool = False,
        log_file: str = "chaostoolkit.log",
        settings: str = CHAOSTOOLKIT_CONFIG_PATH):
    if verbose:
        logzero.loglevel(logging.DEBUG, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s] "\
              "[%(module)s:%(lineno)d]%(end_color)s %(message)s"
    else:
        logzero.loglevel(logging.INFO, update_custom_handlers=False)
        fmt = "%(color)s[%(asctime)s %(levelname)s]%(end_color)s %(message)s"

    if not no_log_file:
        # let's ensure we log at DEBUG level
        logger.setLevel(logging.DEBUG)
        logzero.logfile(click.format_filename(log_file),
                        mode='a',
                        loglevel=logging.DEBUG)

    colors = logzero.LogFormatter.DEFAULT_COLORS.copy()
    colors[logging.CRITICAL] = logzero.ForegroundColors.RED
    logzero.formatter(formatter=logzero.LogFormatter(
        fmt=fmt, datefmt="%Y-%m-%d %H:%M:%S", colors=colors),
                      update_custom_handlers=False)

    subcommand = ctx.invoked_subcommand

    # make it nicer for going through the log file
    logger.debug("#" * 79)
    logger.debug("Running command '{}'".format(subcommand))

    ctx.obj = {}
    ctx.obj["settings_path"] = click.format_filename(settings)
    logger.debug("Using settings file '{}'".format(ctx.obj["settings_path"]))

    if not no_version_check:
        check_newer_version(command=subcommand)

    if change_dir:
        logger.warning("Moving to {d}".format(d=change_dir))
        os.chdir(change_dir)
コード例 #17
0
    def logs(self):

        try:

            logfile('logs/rcon.log', maxBytes=1000000, backupCount=3)

            # Установка форматирования

            format = logging.Formatter('%(asctime)s: %(message)s')

            logzero.formatter(format)

            logger.info(self.sendcmd)  # Лог команд пользователя

            logger.info(self.servcmd)  # Лог команд с сервера

        except:

            pass
コード例 #18
0
ファイル: server.py プロジェクト: thebopshoobop/falcon_fandom
def create_api():
    """Factory function to build a server instance."""

    conf = Config()

    loglevel(conf.log_level)
    logfile(conf.log_file, maxBytes=conf.log_bytes)
    formatter(LogFormatter(fmt=conf.log_format, datefmt=conf.log_date))

    middleware = [
        LogComponent(),
        TokenHandler(conf.secret_key, conf.algorithm, conf.duration)
    ]

    api = falcon.API(middleware=middleware)
    api.resp_options.secure_cookies_by_default = not conf.dev_mode

    api.add_route('/auth', AuthHandler())
    api.add_route('/users', UsersHandler())

    return api
コード例 #19
0
ファイル: __main__.py プロジェクト: eudesbarbosa/cubi-tk
def main(argv=None):
    """Main entry point before parsing command line arguments."""
    # Setup command line parser.
    parser, subparsers = setup_argparse()
    argcomplete.autocomplete(parser)

    # Actually parse command line arguments.
    args = parser.parse_args(argv)

    # Setup logging verbosity.
    if args.verbose:  # pragma: no cover
        level = logging.DEBUG
    else:
        formatter = logzero.LogFormatter(
            fmt="%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s"
        )
        logzero.formatter(formatter)
        level = logging.INFO
    logzero.loglevel(level=level)

    # Handle the actual command line.
    cmds = {
        None: run_nocmd,
        "isa-tpl": run_isa_tpl,
        "isa-tab": run_isa_tab,
        "snappy": run_snappy,
        "sea-snap": run_sea_snap,
        "sodar": run_sodar,
        "irods": run_irods,
        "org-raw": run_org_raw,
        "archive": run_archive,
    }

    res = cmds[args.cmd](args, parser, subparsers.choices[args.cmd] if args.cmd else None)
    if not res:
        logger.info("All done. Have a nice day!")
    else:  # pragma: nocover
        logger.error("Something did not work out correctly.")
    return res
コード例 #20
0
from izen.icfg import Conf, LFormatter
import logzero

PROJECT = 'sonimei'

cfg = Conf(
    pth=os.path.expanduser('~/.{0}/{0}.cfg'.format(PROJECT)),
    dat={
        'pretty.symbols':
        ' ,,,, ,,,,,,,,ﴖ,,,,,,,♪,',
        'snm.save_dir':
        '~/Music/sonimei',
        'snm.timeout':
        15,
        'snm.progress_symbol':
        '.',
        'snm.failure_store':
        os.path.expanduser('~/.{0}/failed.yaml'.format(PROJECT)),
        '163.log_count':
        100,
        '163.log_dir':
        os.path.expanduser(
            '~/Library/Containers/com.netease.163music/Data/Documents/storage/Logs/music.163.log'
        ),
    },
).cfg

logzero.formatter(LFormatter(log_pre=cfg.get('log.symbol', '')))
logzero.loglevel(cfg.get('log.level'), 20)
zlog = logzero.logger
コード例 #21
0
def app_log(info_log) -> None:
    today = date.today()
    logfile(f"log/{today.strftime('%Y-%m-%d')}.log")
    my_formatter = logging.Formatter('%(asctime)s - %(message)s');
    logzero.formatter(my_formatter)
    logger.info(info_log)
コード例 #22
0
    def __init__(
        self,
        loglevel: int = DEFAULT_LOGLEVEL,
        sleep_time: int = DEFAULT_SLEEP_TIME,
        cache_dir: Optional[Union[str, Path]] = None,
        additional_extractors: Optional[List[Any]] = None,
        subtitle_language: str = DEFAULT_SUBTITLE_LANGUAGE,
        skip_subtitles: bool = False,
    ) -> None:
        """
        Main interface to the library

        subtitle_language: for youtube subtitle requests
        skip_subtitles: don't attempt to download youtube subtitles
        sleep_time: time to wait between HTTP requests
        cache_dir: location the store cached data
                   uses default user cache directory if not provided
        """

        # handle cache dir
        cdir: Optional[Path] = None
        if cache_dir is not None:
            cdir = normalize_path(cache_dir)
        else:
            if "URL_METADATA_DIR" in os.environ:
                cdir = Path(os.environ["URL_METADATA_DIR"])
            else:
                cdir = Path(user_data_dir("url_metadata"))

        if cdir.exists() and not cdir.is_dir():
            raise RuntimeError(
                "'cache_dir' '{}' already exists but is not a directory".
                format(str(cdir)))
        if not cdir.exists():
            cdir.mkdir()
        self._base_cache_dir: Path = cdir

        self.cache_dir: Path = self._base_cache_dir / "data"
        if not self.cache_dir.exists():
            self.cache_dir.mkdir()
        self.metadata_cache = MetadataCache(self.cache_dir)

        # setup logging
        self.logger = setup_logger(
            name="url_metadata",
            level=loglevel,
            logfile=self.logpath,
            maxBytes=1e7,
            formatter=formatter(
                "{start}[%(levelname)-7s %(asctime)s %(name)s %(filename)s:%(lineno)d]{end} %(message)s"
            ),
        )

        self.skip_subtitles: bool = skip_subtitles
        self.subtitle_language: str = subtitle_language
        self.sleep_time: int = sleep_time

        ll: Lassie = Lassie()
        # hackery with a requests.Session to save the most recent request object
        ll.client = SaveSession(cb_func=self._save_http_response)
        self.lassie: Lassie = ll

        # default 'last response received' to None
        self._response: Optional[Response] = None

        # initialize site-specific parsers
        self.extractor_classes = EXTRACTORS
        if additional_extractors is not None:
            for ext in additional_extractors:
                if not issubclass(ext, AbstractSite):
                    self.logger.warning(
                        f"{ext} is not a subclass of AbstractSite")
                self.extractor_classes.append(ext)

        self.extractors: List[AbstractSite] = [
            e(umc=self) for e in self.extractor_classes
        ]
コード例 #23
0
ファイル: core.py プロジェクト: seanbreckenridge/url_cache
    def __init__(
        self,
        *,
        cache_dir: Optional[Union[str, Path]] = None,
        loglevel: int = DEFAULT_LOGLEVEL,
        sleep_time: int = DEFAULT_SLEEP_TIME,
        additional_extractors: Optional[List[Any]] = None,
        file_parsers: Optional[List[FileParser]] = None,
        options: Optional[Options] = None,
    ) -> None:
        """
        Main interface to the library

        sleep_time: time to wait between HTTP requests
        cache_dir: location the store cached data
                   uses default user cache directory if not provided
        """

        # handle cache dir
        cdir: Optional[Path] = None
        if cache_dir is not None:
            cdir = normalize_path(cache_dir)
        else:
            if "URL_CACHE_DIR" in os.environ:
                cdir = Path(os.environ["URL_CACHE_DIR"])
            else:
                cdir = Path(user_data_dir("url_cache"))

        if cdir.exists() and not cdir.is_dir():
            raise RuntimeError(
                "'cache_dir' '{}' already exists but is not a directory".
                format(str(cdir)))
        if not cdir.exists():
            cdir.mkdir()
        self._base_cache_dir: Path = cdir

        self.cache_dir: Path = self._base_cache_dir / "data"
        if not self.cache_dir.exists():
            self.cache_dir.mkdir()

        # setup logging
        self.logger: logging.Logger = setup_logger(
            name="url_cache",
            level=loglevel,
            logfile=self.logpath,
            maxBytes=1e7,
            formatter=formatter(
                "{start}[%(levelname)-7s %(asctime)s %(name)s %(filename)s:%(lineno)d]{end} %(message)s"
            ),
        )

        self.sleep_time = sleep_time

        self.options: Options = {} if options is None else options
        self._set_option_defaults()

        ll: Lassie = Lassie()
        # hackery with a requests.Session to save the most recent request object
        ll.client = SaveSession(cb_func=self._save_http_response)
        self.lassie: Lassie = ll

        # default 'last response received' to None
        self._response: Optional[Response] = None

        # initialize site-specific parsers
        self.extractor_classes = EXTRACTORS
        if additional_extractors is not None:
            for ext in additional_extractors:
                if not issubclass(ext, AbstractSite):
                    self.logger.warning(
                        f"{ext} is not a subclass of AbstractSite")
                self.extractor_classes.append(ext)

        self.extractors: List[AbstractSite] = [
            e(uc=self) for e in self.extractor_classes
        ]

        # loop through each extractors file_parsers function
        # to append custom file parsers to the summary cache
        all_file_parsers = [] if file_parsers is None else file_parsers
        for ext in self.extractors:
            all_file_parsers.extend(ext.file_parsers())

        self.summary_cache = SummaryDirCache(self.cache_dir,
                                             file_parsers=all_file_parsers)
コード例 #24
0
def main():
    global args, best_acc1
    args = parser.parse_args()

    args = check_args(args)

    formatter = logging.Formatter('%(message)s')
    logzero.formatter(formatter)

    if not os.path.exists(args.log_dir):
        os.makedirs(args.log_dir, exist_ok=True)

    log_filename = "{}-train.log".format(args.prefix)
    logzero.logfile(os.path.join(args.log_dir, log_filename))

    # calc rgb_mean and rgb_std
    if args.calc_rgb_mean_and_std:
        calc_rgb_mean_and_std(args, logger)

    # setup dataset
    train_loader, train_num_classes, train_class_names, valid_loader, _valid_num_classes, _valid_class_names \
        = get_dataloader(args, args.scale_size, args.input_size)

    if args.disp_batches == 0:
        target = len(train_loader) // 10
        args.disp_batches = target - target % 5
    if args.disp_batches < 5:
        args.disp_batches = 1

    logger.info('Running script with args: {}'.format(str(args)))
    logger.info("scale_size: {}  input_size: {}".format(
        args.scale_size, args.input_size))
    logger.info("rgb_mean: {}".format(args.rgb_mean))
    logger.info("rgb_std: {}".format(args.rgb_std))
    logger.info("number of train dataset: {}".format(len(
        train_loader.dataset)))
    logger.info("number of validation dataset: {}".format(
        len(valid_loader.dataset)))
    logger.info("number of classes: {}".format(len(train_class_names)))

    if args.mixup:
        logger.info("Using mixup: alpha:{}".format(args.mixup_alpha))
    if args.ricap:
        logger.info("Using RICAP: beta:{}".format(args.ricap_beta))
    if args.icap:
        logger.info("Using ICAP: beta:{}".format(args.icap_beta))
    if args.cutmix:
        logger.info("Using CutMix: prob:{} beta:{}".format(
            args.cutmix_prob, args.cutmix_beta))
    if args.cutout:
        logger.info("Using cutout: holes:{} length:{}".format(
            args.cutout_holes, args.cutout_length))
    if args.felix:
        logger.info("Using felix: holes:{} length:{}".format(
            args.felix_holes, args.felix_length))
    if args.random_erasing:
        logger.info(
            "Using Random Erasing: p:{} s_l:{} s_h:{} r1:{} r2:{}".format(
                args.random_erasing_p, args.random_erasing_sl,
                args.random_erasing_sh, args.random_erasing_r1,
                args.random_erasing_r2))

    device = torch.device("cuda" if args.cuda else "cpu")

    # create  model
    if args.resume:
        # resume from a checkpoint
        if os.path.isfile(args.resume):
            logger.info("=> loading saved checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.model = checkpoint['arch']
            base_model = make_model(args.model,
                                    num_classes=train_num_classes,
                                    pretrained=False,
                                    input_size=(args.input_size,
                                                args.input_size))
            base_model.load_state_dict(checkpoint['model'])
            args.start_epoch = checkpoint['epoch']
            best_acc1 = float(checkpoint['acc1'])
            logger.info("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            logger.error("=> no checkpoint found at '{}'".format(args.resume))
            sys.exit(1)
    else:
        if args.scratch:
            # train from scratch
            logger.info("=> creating model '{}' (train from scratch)".format(
                args.model))
            base_model = make_model(args.model,
                                    num_classes=train_num_classes,
                                    pretrained=False,
                                    input_size=(args.input_size,
                                                args.input_size))
        else:
            # fine-tuning
            logger.info("=> using pre-trained model '{}'".format(args.model))
            base_model = make_model(args.model,
                                    num_classes=train_num_classes,
                                    pretrained=True,
                                    input_size=(args.input_size,
                                                args.input_size))

    if args.cuda:
        logger.info("=> using GPU")
        model = nn.DataParallel(base_model)
        model.to(device)
    else:
        logger.info("=> using CPU")
        model = base_model

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = get_optimizer(args, model)
    logger.info('=> using optimizer: {}'.format(args.optimizer))
    if args.resume:
        optimizer.load_state_dict(checkpoint['optimizer'])
        logger.info("=> restore optimizer state from checkpoint")

    # create scheduler
    if args.lr_patience:
        scheduler = get_reduce_lr_on_plateau_scheduler(args, optimizer)
        logger.info("=> using ReduceLROnPlateau scheduler")
    elif args.cosine_annealing_t_max:
        scheduler = get_cosine_annealing_lr_scheduler(
            args, optimizer, args.cosine_annealing_t_max, len(train_loader))
        logger.info("=> using CosineAnnealingLR scheduler")
    else:
        scheduler = get_multi_step_lr_scheduler(args, optimizer,
                                                args.lr_step_epochs,
                                                args.lr_factor)
        logger.info("=> using MultiStepLR scheduler")
    if args.resume:
        scheduler.load_state_dict(checkpoint['scheduler'])
        logger.info("=> restore lr scheduler state from checkpoint")

    logger.info("=> model and logs prefix: {}".format(args.prefix))
    logger.info("=> log dir: {}".format(args.log_dir))
    logger.info("=> model dir: {}".format(args.model_dir))
    tensorboradX_log_dir = os.path.join(args.log_dir,
                                        "{}-tensorboardX".format(args.prefix))
    log_writer = tensorboardX.SummaryWriter(tensorboradX_log_dir)
    logger.info("=> tensorboardX log dir: {}".format(tensorboradX_log_dir))

    if args.cuda:
        cudnn.benchmark = True

    if args.lr_patience:  # ReduceLROnPlateau
        scheduler.step(float('inf'))
    elif not args.cosine_annealing_t_max:  # MultiStepLR
        scheduler.step()

    # for CosineAnnealingLR
    if args.resume:
        args.warm_restart_next = checkpoint['args'].warm_restart_next
        args.warm_restart_current = checkpoint['args'].warm_restart_current
    else:
        if args.cosine_annealing_t_max:  # CosineAnnealingLR
            args.warm_restart_next = args.cosine_annealing_t_max + args.warmup_epochs
            args.warm_restart_current = args.warmup_epochs

    for epoch in range(args.start_epoch, args.epochs):
        start = time.time()

        # CosineAnnealingLR warm restart
        if args.cosine_annealing_t_max and (epoch % args.warm_restart_next
                                            == 0) and epoch != 0:
            current_span = args.warm_restart_next - args.warm_restart_current
            next_span = current_span * args.cosine_annealing_mult
            args.warm_restart_current = args.warm_restart_next
            args.warm_restart_next = args.warm_restart_next + next_span
            scheduler = get_cosine_annealing_lr_scheduler(
                args, optimizer, next_span, len(train_loader))

        if args.mixup:
            train(args, 'mixup', train_loader, model, device, criterion,
                  optimizer, scheduler, epoch, logger, log_writer)
        elif args.ricap:
            train(args, 'ricap', train_loader, model, device, criterion,
                  optimizer, scheduler, epoch, logger, log_writer)
        elif args.icap:
            train(args, 'icap', train_loader, model, device, criterion,
                  optimizer, scheduler, epoch, logger, log_writer)
        elif args.cutmix:
            train(args, 'cutmix', train_loader, model, device, criterion,
                  optimizer, scheduler, epoch, logger, log_writer)
        else:
            train(args, 'normal', train_loader, model, device, criterion,
                  optimizer, scheduler, epoch, logger, log_writer)

        report_lr(epoch, 'x_learning_rate', get_lr(optimizer), logger,
                  log_writer)

        acc1 = valid(args, valid_loader, model, device, criterion, optimizer,
                     scheduler, epoch, logger, log_writer)

        elapsed_time = time.time() - start
        logger.info("Epoch[{}] Time cost: {} [sec]".format(
            epoch, elapsed_time))

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        save_model(args, base_model, optimizer, scheduler, is_best,
                   train_num_classes, train_class_names, epoch, acc1, logger)
コード例 #25
0
ファイル: log_zero.py プロジェクト: txu2k8/libs-py
import logging
import logzero
from logzero import logger
from logzero.colors import Fore as ForegroundColors

# init logger
fmt = "%(asctime)s %(color)s%(levelname)-8.8s%(end_color)s: %(message)s"
datefmt = '%y/%m/%d %H:%M:%S'
colors = {
    logging.DEBUG: ForegroundColors.CYAN,
    logging.INFO: ForegroundColors.GREEN,
    logging.WARNING: ForegroundColors.YELLOW,
    logging.ERROR: ForegroundColors.LIGHTRED_EX,
    logging.CRITICAL: ForegroundColors.LIGHTMAGENTA_EX
}
logzero.formatter(logzero.LogFormatter(fmt=fmt, datefmt=datefmt,
                                       colors=colors))
logzero.logfile("debug.log", loglevel=logging.DEBUG)
logzero.loglevel(logging.DEBUG)

if __name__ == "__main__":

    logger.debug("hello")
    logger.info("info")
    logger.warn("warn")
    logger.error("error")

    # This is how you'd log an exception
    try:
        raise Exception("this is a demo exception")
    except Exception as e:
        logger.exception(e)
コード例 #26
0
import torch
import torch.nn as nn
from torch.autograd import Variable

import logging
import logzero
from logzero import logger

# logger setting
LOG_FORMAT = '[%(asctime)s %(levelname)s] %(message)s'
logzero.loglevel(logging.INFO)
logzero.formatter(logging.Formatter(LOG_FORMAT))
logzero.logfile('log/contrastive-center.log')


class ContrastiveCenterLoss(nn.Module):
    def __init__(self, dim_hidden, num_classes, lambda_c=1.0, use_cuda=True):
        super(ContrastiveCenterLoss, self).__init__()
        self.dim_hidden = dim_hidden
        self.num_classes = num_classes
        self.lambda_c = lambda_c
        self.centers = nn.Parameter(torch.randn(num_classes, dim_hidden))
        self.use_cuda = use_cuda

    # may not work due to flowing gradient. change center calculation to exp moving avg may work.
    def forward(self, y, hidden):
        batch_size = hidden.size()[0]
        expanded_centers = self.centers.expand(batch_size, -1, -1)
        expanded_hidden = hidden.expand(self.num_classes, -1,
                                        -1).transpose(1, 0)
        distance_centers = (expanded_hidden -
コード例 #27
0
ファイル: aspi.py プロジェクト: coderdojo-futurix/aspi
 def __write_header(self):
     logzero.formatter(NO_TIMESTAMP_FORMATTER)
     logger.info(self.__generate_header_line())
     logzero.formatter(self.formatter)
コード例 #28
0
ファイル: my_logger.py プロジェクト: ChantWei/room_price
def LoggerInit():
    logzero.logfile("./logfile.log", maxBytes=10e6, backupCount=3)
    formatter = logging.Formatter(
        '%(asctime)-15s %(filename)s:%(lineno)s [%(levelname)s] %(message)s')
    logzero.formatter(formatter)
コード例 #29
0
import logging
import logzero

# Setup rotating logfile with 3 rotations, each with a maximum filesize of 1MB:
logzero.logfile("rotating-logfile.log", maxBytes=1024*1024, backupCount=3)
logzero.loglevel(logging.DEBUG)

# Set a custom formatter
formatter = logging.Formatter('%(asctime)s %(levelname)-s %(message)s', "%Y-%m-%d %H:%M:%S")
logzero.formatter(formatter)
コード例 #30
0
def main():
    global args
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    if args.prefix == 'auto':
        args.prefix = datetime.datetime.now().strftime('%Y%m%d%H%M%S')

    formatter = logging.Formatter('%(message)s')
    logzero.formatter(formatter)

    if not os.path.exists(args.log_dir):
        os.makedirs(args.log_dir, exist_ok=True)

    log_filename = "{}-test.log".format(args.prefix)
    log_filepath = os.path.join(args.log_dir, log_filename)
    logzero.logfile(log_filepath)

    if args.workers is None:
        args.workers = max(1, int(0.8 * cpu_count()))
    elif args.workers == -1:
        args.workers = cpu_count()

    cudnn.benchmark = True

    logger.info('Running script with args: {}'.format(str(args)))

    checkpoint = load_checkpoint(args, args.model)
    logger.info("=> loaded the model (epoch {})".format(checkpoint['epoch']))
    model_arch = checkpoint['arch']
    model_args = checkpoint['args']

    if args.scale_size:
        scale_size = args.scale_size
    else:
        scale_size = model_args.scale_size
    if args.input_size:
        input_size = args.input_size
    else:
        input_size = model_args.input_size

    if args.rgb_mean:
        rgb_mean = args.rgb_mean
        rgb_mean = [float(mean) for mean in rgb_mean.split(',')]
    else:
        rgb_mean = model_args.rgb_mean
    if args.rgb_std:
        rgb_std = args.rgb_std
        rgb_std = [float(std) for std in rgb_std.split(',')]
    else:
        rgb_std = model_args.rgb_std

    if args.interpolation:
        interpolation = args.interpolation
    else:
        try:
            interpolation = model_args.interpolation
        except AttributeError:
            interpolation = 'BILINEAR'

    logger.info("scale_size: {}  input_size: {}".format(scale_size, input_size))
    logger.info("rgb_mean: {}".format(rgb_mean))
    logger.info("rgb_std: {}".format(rgb_std))
    logger.info("interpolation: {}".format(interpolation))

    interpolation = getattr(Image, interpolation, 2)

    # Data augmentation and normalization for test
    data_transforms = {
        'test': transforms.Compose([
            transforms.Resize(scale_size, interpolation=interpolation),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            transforms.Normalize(rgb_mean, rgb_std)
        ]),
        'test_FiveCrop': transforms.Compose([
            transforms.Resize(scale_size, interpolation=interpolation),
            transforms.FiveCrop(input_size),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))
        ]),
        'test_TenCrop': transforms.Compose([
            transforms.Resize(scale_size, interpolation=interpolation),
            transforms.TenCrop(input_size),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))
        ]),
        'test_CustomSixCrop': transforms.Compose([
            transforms.Resize(scale_size, interpolation=interpolation),
            CustomSixCrop(input_size),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))
        ]),
        'test_CustomSevenCrop': transforms.Compose([
            transforms.Resize(scale_size, interpolation=interpolation),
            CustomSevenCrop(input_size),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))
        ]),
        'test_CustomTenCrop': transforms.Compose([
            transforms.Resize(scale_size, interpolation=interpolation),
            CustomTenCrop(input_size),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))
        ]),
        'test_CustomTwentyCrop': transforms.Compose([
            transforms.Resize(scale_size, interpolation=interpolation),
            CustomTwentyCrop(input_size),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.Normalize(rgb_mean, rgb_std)(crop) for crop in crops]))
        ])

    }

    tfms = 'test'
    if args.tta:
        tfms = 'test_FiveCrop'
        batch_size = args.batch_size // 5
    elif args.tta_ten_crop:
        tfms = 'test_TenCrop'
        batch_size = args.batch_size // 10
    elif args.tta_custom_six_crop:
        tfms = 'test_CustomSixCrop'
        batch_size = args.batch_size // 6
    elif args.tta_custom_seven_crop:
        tfms = 'test_CustomSevenCrop'
        batch_size = args.batch_size // 7
    elif args.tta_custom_ten_crop:
        tfms = 'test_CustomTenCrop'
        batch_size = args.batch_size // 10
    elif args.tta_custom_twenty_crop:
        tfms = 'test_CustomTwentyCrop'
        batch_size = args.batch_size // 20
    else:
        batch_size = args.batch_size

    image_datasets = {
        'test': ImageFolderWithPaths(os.path.join(args.data, 'test'), data_transforms[tfms])
    }

    test_num_classes = len(image_datasets['test'].classes)
    test_class_names = image_datasets['test'].classes

    kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
    test_loader = torch.utils.data.DataLoader(
        image_datasets['test'], batch_size=batch_size, shuffle=False, **kwargs)

    logger.info("number of test dataset: {}".format(len(test_loader.dataset)))
    logger.info("number of classes: {}".format(len(test_class_names)))

    model, num_classes, class_names = load_model_from_checkpoint(args, checkpoint, test_num_classes, test_class_names)

    if args.topk > num_classes:
        logger.warn('--topk must be less than or equal to the class number of the model')
        args.topk = num_classes
        logger.warn('--topk set to {}'.format(num_classes))

    # check test and train class names
    do_report = True
    if test_num_classes != num_classes:
        logger.info("The number of classes for train and test is different.")
        logger.info("Skip accuracy report.")
        do_report = False

    test(args, model_arch, model, test_loader, class_names, do_report)
    logger.info("=> Saved test log to \"{}\"".format(log_filepath))