예제 #1
0
def cal_R2():
    x_r2, y_r2, t_r2 = load_tensor(test_data_dir)
    y_r2 = y_r2.numpy()
    print('Loaded R2 data!')
    ntimes = len(args.times)
    n_test = args.n_test
    y_sum = np.full((ntimes, 2, 50, 50), 0.0)

    for i in range(n_test):
        y = y_mc[i * ntimes:(i + 1) * ntimes, :2]
        y_sum = y_sum + y
    y_mean = y_sum / n_test

    nominator = 0.0
    denominator = 0.0
    for i in range(n_test):
        print(i)
        x = x_r2[i * ntimes:(i + 1) * ntimes]
        t = t_r2[i * ntimes:(i + 1) * ntimes]
        y = y_r2[i * ntimes:(i + 1) * ntimes, :2]
        x, t = x.to(device), t.to(device)
        model.eval()
        with th.no_grad():
            y_pred = model(x, t)
        y_pred = y_pred.data.cpu().numpy()
        nominator = nominator + ((y - y_pred[:, :2])**2).sum()
        denominator = denominator + ((y - y_mean)**2).sum()
    R2 = 1 - nominator / denominator
    print("R2: {}".format(R2))
    return R2
예제 #2
0
def predict():
    # predict the output images
    interp_x, interp_y, interp_times = load_tensor(args.data_dir +
                                                   '/lhs50_t3_expanded.hdf5')
    ntimes = 3
    y_DCEDN = np.full((50, 1, 50, 50), 0.0)
    for i in range(50):
        x = interp_x[i * ntimes:(i + 1) * ntimes]
        t = interp_times[i * ntimes:(i + 1) * ntimes]
        x, t = x.to(device), t.to(device)
        model.eval()
        y_pred = model(x, t)
        y_pred = y_pred.data.cpu().numpy()
        y_DCEDN[i] = y_pred[ntimes - 1, [2]]

    hf = h5py.File(
        args.output_dir + '/N_{}_SgLabel_DCEDN.hdf5'.format(args.n_train), 'w')
    hf.create_dataset('dataset', data=y_DCEDN, dtype='f', compression='gzip')
    hf.close()
예제 #3
0
                 bottleneck=args.bottleneck,
                 time_channels=args.zt).to(device)
print(model)

# load data
h5py_dir = args.data_dir
train_data_dir = h5py_dir + '/lhs{}_t{}_expanded.hdf5'.format(
    args.n_train, len(args.times))
test_data_dir = h5py_dir + '/lhs{}_t{}_expanded.hdf5'.format(
    args.n_test, len(args.times))
interp_data_dir = h5py_dir + '/lhs{}_t{}.hdf5'.format(
    400, 3)  # data used to plot the results at t= 5, 50, and 100 days
train_loader, train_stats = load_data_dynamic(train_data_dir, args.batch_size)
test_loader, test_stats = load_data_dynamic(test_data_dir,
                                            args.test_batch_size)
interp_x, interp_y, interp_times = load_tensor(interp_data_dir)
print('Loaded data!')

if args.loss_fn == "mse":
    loss_fn = nn.MSELoss(reduction='sum')
elif args.loss_fn == "l1":
    loss_fn = nn.L1Loss()
elif args.loss_fn == 'huber':
    loss_fn = nn.SmoothL1Loss()

optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)
scheduler = ReduceLROnPlateau(optimizer,
                              mode='min',
                              factor=0.1,