Пример #1
0
def changelog(signum, frame):
    for handler in logger.handlers[:]:
        logger.removeHandler(handler)
    logfile = "{0}/app.log.{1}".format(settings["log_path"],
                                       time.strftime("%Y%m%d"))
    fh = logging.FileHandler(filename=logfile)
    fh.setFormatter(settings["log_format"])
    logger.setLevel(logging.INFO)
    logger.addHandler(fh)
Пример #2
0
def initLog(level):
    """初始化日志"""
    logger.setLevel(level)
    # 创建一个handler,用于写入日志文件
    fh = logging.FileHandler('webtest.log')
    fh.setLevel(level)
    # 再创建一个handler,用于输出到控制台
    ch = logging.StreamHandler()
    ch.setLevel(level)
    # 定义handler的输出格式
    formatter = logging.Formatter('[%(asctime)s-%(name)s-%(levelname)s] %(message)s')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    # 给logger添加handler
    logger.addHandler(fh)
    logger.addHandler(ch)
Пример #3
0
                    trainer.train(1)
            trainer.update_model()
            times.append(time.time()-s)
            if i % display == 0 and i > 0: 
                time_per_iter = np.mean(times)
                logger.info('Time per iteration including communication: %f. Speed: %f images/s', time_per_iter, batch_size * nsteps_update / time_per_iter)
                times = []


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Single trainer")
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--nsteps-update', type=int, default=1)
    parser.add_argument('--dataset', type=str, default='imagenet', choices=_support_datasets, help='Specify the dataset for training')
    parser.add_argument('--dnn', type=str, default='resnet50', choices=_support_dnns, help='Specify the neural network for training')
    parser.add_argument('--data-dir', type=str, default='./data', help='Specify the data root path')
    parser.add_argument('--lr', type=float, default=0.1, help='Default learning rate')
    parser.add_argument('--max-epochs', type=int, default=settings.MAX_EPOCHS, help='Default maximum epochs to train')
    parser.add_argument('--num-steps', type=int, default=35)
    args = parser.parse_args()
    batch_size = args.batch_size * args.nsteps_update
    prefix = settings.PREFIX
    relative_path = './logs/singlegpu-%s/%s-n%d-bs%d-lr%.4f-ns%d' % (prefix, args.dnn, 1, batch_size, args.lr, args.nsteps_update)
    utils.create_path(relative_path)
    logfile = os.path.join(relative_path, settings.hostname+'.log')
    hdlr = logging.FileHandler(logfile)
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr) 
    logger.info('Configurations: %s', args)
    train_with_single(args.dnn, args.dataset, args.data_dir, 1, args.lr, args.batch_size, args.nsteps_update, args.max_epochs, args.num_steps)
Пример #4
0
#    parser.add_option("-t", "--threads", dest="threads",
#                        help="threads number used for scan")
    (opt, args) = parser.parse_args()

    if not opt.filename and not opt.ip:
        parser.print_help()
        exit(1)

    if opt.verbose:
        logger.setLevel(logging.INFO)

    if opt.output:
        fh = logging.FileHandler(opt.output)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')  
        fh.setFormatter(formatter)
        logger.addHandler(fh)
    
        
    if opt.filename:
        with open(opt.filename, "rb") as f:
            for line in f:
                if line.lstrip(" ").startswith("#"):
                    continue

                ipSegmentScan(line)
    if opt.ip:
        ipSegmentScan(opt.ip)