Exemple #1
0
        prev_best_data = torch.load(os.path.join(logdir, 'best.pt'))
        prev_best = prev_best_data['loss_eval']
        del prev_best_data
    except KeyError:
        prev_best = None
else:
    prev_best = None
best_model_logger = loggers.ModelSaveLogger(os.path.join(logdir, 'best.pt'),
                                            period=1,
                                            save_optimizer=True,
                                            save_best=True,
                                            prev_best=prev_best)
logger_list = [
    loggers.TerminateOnNaN(),
    loggers.ProgbarLogger(allow_unused_fields='all'),
    loggers.CsvLogger(os.path.join(logdir, 'epoch_loss.csv'),
                      allow_unused_fields='all'),
    loggers.ModelSaveLogger(os.path.join(logdir, 'nets', '{epoch:04d}.pt'),
                            period=opt.save_net,
                            save_optimizer=opt.save_net_opt),
    loggers.ModelSaveLogger(os.path.join(logdir, 'checkpoint.pt'),
                            period=1,
                            save_optimizer=True),
    best_model_logger,
]
if opt.log_batch:
    logger_list.append(
        loggers.BatchCsvLogger(os.path.join(logdir, 'batch_loss.csv'),
                               allow_unused_fields='all'))
if opt.tensorboard:
    tf_logdir = os.path.join(opt.logdir, 'tensorboard', exprdir,
                             str(opt.expr_id))
Exemple #2
0
    opt_f_old = os.path.join(logdir, 'opt.pt')
    opt = options_train.overwrite(opt, opt_f_old, unique_opt_params)

# Save opt
torch.save(vars(opt), os.path.join(logdir, 'opt.pt'))
with open(os.path.join(logdir, 'opt.txt'), 'w') as fout:
    for k, v in vars(opt).items():
        fout.write('%20s\t%-20s\n' % (k, v))

opt.full_logdir = logdir
print(str_verbose, "Logging directory set to: %s" % logdir)

###################################################

print(str_stage, "Setting up loggers")
csv_logger = logger.CsvLogger(opt, os.path.join(logdir, 'epoch_loss.csv'))
metric_logger = logger.StatisticLogger(logdir)
model_logger = logger.ModelLogger(logdir)

###################################################

print(str_stage, "Setting up models")
model = models.get_model(opt)
print("# model parameters: {:,d}".format(
    sum(p.numel() for p in model.parameters() if p.requires_grad)))
if opt.gpu == '-2':
    model = nn.DataParallel(model)
model = model.to(device)

###################################################