Пример #1
0
def load_checkpoint_and_setup(args, model):
    assert args.load_type is not None, 'args.load_type should be specified!'
    pretrained_desc = get_model_description(args, postprocess=False)
    checkpoint_path = "params/{}/{}_model.pth".format(pretrained_desc,
                                                      args.load_type)
    checkpoint = torch.load(checkpoint_path, map_location=device)
    model.load_state_dict(checkpoint["state_dict"])
    epoch_last = checkpoint["epoch"]
    global_step = checkpoint["global_step"]
    if args.loss == 'evi':
        for name, param in model.named_parameters():
            param.requires_grad = True if name == 'tmps' else False
        optimizer = optim.Adam(model.parameters(), lr=2e-2)
    elif args.loss == 'L1':
        optimizer = optim.Adam(model.parameters(), lr=args.lr)
    return model, optimizer, epoch_last, global_step
Пример #2
0
        print('itr', itr, flush=True)
        x, y = x.to(device), y.to(device)
        pred, epis = model.compute_prediction_and_uncertainty(x)
        y, pred, epis = prepare_to_visualize(y, pred, epis, mean, std)
        fig = plt.figure(figsize=(5, 2.5), dpi=300)
        plt.subplots_adjust(wspace=0.25, hspace=0.4)
        add_subplot(fig, 111, y, pred, epis)
        plt.show()
        plt.close()


if __name__ == "__main__":
    args = parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    postprocess = args.postprocess
    model_desc = get_model_description(args, postprocess)
    checkpoint_path = "params/{}/{}_model.pth".format(model_desc,
                                                      args.load_type)
    te_dataset = ARTDataset(args.te_path)
    te_loader = DataLoader(te_dataset,
                           batch_size=1,
                           shuffle=False,
                           num_workers=1)
    config = get_model_config(args, postprocess)
    model = Model(**config).to(device)
    checkpoint = torch.load(checkpoint_path, map_location=device)
    print("Load checkpoint from: {}".format(checkpoint_path))
    model.load_state_dict(checkpoint["state_dict"])
    model.eval()
    with torch.no_grad():
        viz_regression(te_loader, model, args)
Пример #3
0
tf.flags.DEFINE_integer('batch_size', 32, 'Minibatch size')
tf.flags.DEFINE_integer('latent_dim', 2, 'Number of latent dimensions')
tf.flags.DEFINE_integer('test_image_number', 5,
                        'Number of test images to recover during training')
tf.flags.DEFINE_integer(
    'epochs_to_plot', 2,
    'Number of epochs before saving test sample of reconstructed images')
tf.flags.DEFINE_integer('save_after_n', 20,
                        'Number of epochs before saving network')
tf.flags.DEFINE_string('logdir', './logs', 'Logs folder')
tf.flags.DEFINE_string('data_path', './Data/Images', 'Logs folder')
tf.flags.DEFINE_bool('shuffle', True, 'Shuffle dataset for training')
FLAGS = tf.flags.FLAGS

# Prepare output directories
model_description = get_model_description(FLAGS)
results_folder = create_results_folder(
    os.path.join('Results', model_description))
model_folder = create_folder(os.path.join('Models', model_description))
delete_old_logs(FLAGS.logdir)

# Create tf dataset
with tf.name_scope('DataPipe'):
    filenames = tf.placeholder_with_default(get_files(FLAGS.data_path),
                                            shape=[None],
                                            name='filenames_tensor')
    dataset = load_and_process_data(filenames,
                                    batch_size=FLAGS.batch_size,
                                    shuffle=FLAGS.shuffle)
    iterator = dataset.make_initializable_iterator()
    input_batch = iterator.get_next()
Пример #4
0
    global global_step
    global_step = 0
    args = parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    tr_dataset = ARTDataset(args.tr_path)
    val_dataset = ARTDataset(args.val_path)
    tr_loader = DataLoader(tr_dataset, batch_size=args.bsz, num_workers=args.num_workers, shuffle=True)
    val_loader = DataLoader(val_dataset, batch_size=args.bsz, num_workers=args.num_workers, shuffle=False)

    config = get_model_config(args)
    model = Model(**config).to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    stats = pickle.load(open(args.stats_path, 'rb'))
    model_desc = get_model_description(args)
    save_dir, log_dir = make_dirs(model_desc)
    log_train, log_valid, log_info = get_logger(log_dir)
    writer = SummaryWriter(logdir=os.path.join(log_dir, 'runs', str(time.strftime('%Y-%m-%d_%H:%M:%S'))))
    write_experiment_info(log_info, args, model)

    loss_best = MAE_best = 987654321
    for epoch in range(1, 987654321):
        print('# --- {}th epoch start --- # '.format(epoch))
        train(epoch, writer, log_train, args)
        with torch.no_grad():
            loss_val, MAE_val = validate(epoch, writer, log_valid, args)

        save_checkpoint(epoch, global_step, model, optimizer, save_dir, "latest_model.pth")
        print('Model Saved! - [latest_model.pth]')
        if loss_val < loss_best: