def main():
    # Load
    G = torch.from_numpy(np.load(os.path.join(
        data_folder, "F_niklas.npy"))).float().detach()
    grid = Grid.load(os.path.join(data_folder, "grid.pickle"))
    volcano_coords = torch.from_numpy(grid.cells).float().detach()

    # Define GP model.
    data_std = 0.1
    sigma0 = 1.0
    m0 = 2139.1
    lambda0 = 200.0

    ground_truth = torch.from_numpy(
        np.load(os.path.join(results_folder, "ground_truth.npy")))
    synth_data = torch.from_numpy(
        np.load(os.path.join(results_folder, "synth_data.npy")))

    # Now train GP model on it.
    myGP = InverseGaussianProcess(m0, sigma0, lambda0, volcano_coords, kernel)
    myGP.train(np.linspace(1.0, 2000, 20),
               G,
               synth_data,
               data_std,
               out_path=os.path.join(results_folder,
                                     "./train_res_matern52.pck"),
               n_epochs=2000,
               lr=0.5,
               n_chunks=80,
               n_flush=1)
def main():
    # Load
    data_folder = "/home/cedric/PHD/Dev/VolcapySIAM/data/InversionDatas/stromboli_23823_cells"
    F = torch.from_numpy(np.load(os.path.join(
        data_folder, "F_niklas.npy"))).float().detach()

    grid = Grid.load(os.path.join(data_folder, "grid.pickle"))
    volcano_coords = torch.from_numpy(grid.cells).float().detach()

    data_coords = torch.from_numpy(
        np.load(os.path.join(data_folder, "niklas_data_coords.npy"))).float()
    data_values = torch.from_numpy(
        np.load(os.path.join(data_folder, "niklas_data_obs.npy"))).float()

    print("Size of inversion grid: {} cells.".format(volcano_coords.shape[0]))
    print("Number of datapoints: {}.".format(data_coords.shape[0]))
    size = data_coords.shape[0] * volcano_coords.shape[0] * 4 / 1e9
    cov_size = (volcano_coords.shape[0])**2 * 4 / 1e9
    print("Size of Covariance matrix: {} GB.".format(cov_size))
    print("Size of Pushforward matrix: {} GB.".format(size))

    # HYPERPARAMETERS
    data_std = 0.1
    sigma0 = 100.6
    m0 = 1000.0
    lambda0 = 300.0

    start = timer()
    myGP = InverseGaussianProcess(m0,
                                  sigma0,
                                  lambda0,
                                  volcano_coords,
                                  kernel,
                                  logger=logger)

    m_post_m, m_post_d = myGP.condition_model(F,
                                              data_values,
                                              data_std,
                                              concentrate=False,
                                              is_precomp_pushfwd=False)

    end = timer()
    print("Non-sequential inversion run in {} s.".format(end - start))

    # Train.
    myGP.train(np.linspace(20, 1400, 14),
               F,
               data_values,
               data_std,
               out_path="./train_res.pck",
               n_chunks=2,
               n_flush=50)
示例#3
0
def main():
    # Load
    data_folder = "/home/cedric/PHD/Dev/VolcapySIAM/data/InversionDatas/stromboli_40357_cells"
    F = torch.from_numpy(
            np.load(os.path.join(data_folder, "F_niklas.npy"))).float().detach()

    F = torch.from_numpy(
            np.load(os.path.join(data_folder, "F_niklas_corr.npy"))).float().detach()

    grid = Grid.load(os.path.join(data_folder,
                    "grid.pickle"))
    volcano_coords = torch.from_numpy(
            grid.cells).float().detach()

    data_coords = torch.from_numpy(
            np.load(os.path.join(data_folder,"niklas_data_coords.npy"))).float()
    data_values = torch.from_numpy(
            np.load(os.path.join(data_folder,"niklas_data_obs.npy"))).float()

    print("Size of inversion grid: {} cells.".format(volcano_coords.shape[0]))
    print("Number of datapoints: {}.".format(data_coords.shape[0]))
    size = data_coords.shape[0]*volcano_coords.shape[0]*4 / 1e9
    cov_size = (volcano_coords.shape[0])**2 * 4 / 1e9
    print("Size of Covariance matrix: {} GB.".format(cov_size))
    print("Size of Pushforward matrix: {} GB.".format(size))

    # HYPERPARAMETERS
    data_std = 0.1
    sigma0 = 221.6
    m0 = 2133.8
    lambda0 = 462.0

    # Run inversion in one go to compare.
    import volcapy.covariance.exponential as kernel
    start = timer()
    myGP = InverseGaussianProcess(m0, sigma0, lambda0,
            volcano_coords, kernel,
            logger=logger)

    cov_pushfwd = cl.compute_cov_pushforward(
            lambda0, F, volcano_coords, n_chunks=20,
            n_flush=50)
    
    m_post_m, m_post_d = myGP.condition_model(F, data_values, data_std,
            concentrate=False,
            is_precomp_pushfwd=False)

    end = timer()
    print("Non-sequential inversion run in {} s.".format(end - start))

    # Train.
    myGP.train_fixed_lambda(200.0, F, data_values, data_std)
示例#4
0
def main():
    # Load static data.
    F = torch.from_numpy(
        np.load(os.path.join(original_data_folder,
                             "F_niklas.npy"))).float().detach()
    """
    grid = Grid.load(os.path.join(data_folder,
                    "grid.pickle"))
    """
    volcano_coords = torch.from_numpy(
        np.load(os.path.join(original_data_folder,
                             "volcano_coords.npy"))).float().detach()

    data_coords = torch.from_numpy(
        np.load(os.path.join(original_data_folder,
                             "data_coords.npy"))).float().detach()
    data_values = torch.from_numpy(
        np.load(os.path.join(original_data_folder,
                             "data_obs.npy"))).float().detach()

    print("Size of inversion grid: {} cells.".format(volcano_coords.shape[0]))
    print("Number of datapoints: {}.".format(data_coords.shape[0]))
    size = data_coords.shape[0] * volcano_coords.shape[0] * 4 / 1e9
    cov_size = (volcano_coords.shape[0])**2 * 4 / 1e9
    print("Size of Covariance matrix: {} GB.".format(cov_size))
    print("Size of Pushforward matrix: {} GB.".format(size))

    # HYPERPARAMETERS to start search from.
    data_std = 0.1
    sigma0 = 371.82
    m0 = 561.0
    lambda0 = 50.0

    myGP = InverseGaussianProcess(m0, sigma0, lambda0, volcano_coords, kernel)

    # Train.
    myGP.train(np.linspace(914.0, 1914.0, 20),
               F,
               data_values,
               data_std,
               out_path="./train_res_matern32.pck",
               n_epochs=2000,
               lr=0.5,
               n_chunks=80,
               n_flush=1)
示例#5
0
def main():
    # Create output directory.
    output_folder = os.path.join(base_folder, "./train_res.pck")

    # Load static data.
    F = torch.from_numpy(
        np.load(os.path.join(data_folder,
                             "F_corrected_final.npy"))).float().detach()
    grid = Grid.load(os.path.join(data_folder, "grid.pickle"))
    volcano_coords = torch.from_numpy(grid.cells).float().detach()

    data_coords = torch.from_numpy(
        np.load(
            os.path.join(data_folder,
                         "niklas_data_coords_corrected_final.npy"))).float()
    data_values = torch.from_numpy(
        np.load(
            os.path.join(data_folder,
                         "niklas_data_obs_corrected_final.npy"))).float()

    print("Size of inversion grid: {} cells.".format(volcano_coords.shape[0]))
    print("Number of datapoints: {}.".format(data_coords.shape[0]))
    size = data_coords.shape[0] * volcano_coords.shape[0] * 4 / 1e9
    cov_size = (volcano_coords.shape[0])**2 * 4 / 1e9
    print("Size of Covariance matrix: {} GB.".format(cov_size))
    print("Size of Pushforward matrix: {} GB.".format(size))

    # HYPERPARAMETERS to start search from.
    data_std = 0.1
    sigma0 = 340.0
    m0 = 561.0
    lambda0 = 50.0

    myGP = InverseGaussianProcess(m0, sigma0, lambda0, volcano_coords, kernel)

    # Train.
    myGP.train(np.linspace(50.0, 1450, 30),
               F,
               data_values,
               data_std,
               out_path="./train_res.pck",
               n_epochs=3000,
               lr=0.5,
               n_chunks=80,
               n_flush=1)
示例#6
0
def sample_prior(input_path, output_path, n_realizations):
    os.makedirs(output_path, exist_ok=True)

    # Load
    grid = Grid.load(os.path.join(input_path,
                    "grid.pickle"))
    volcano_coords = torch.from_numpy(
            grid.cells).float().detach()

    # HYPERPARAMETERS.
    data_std = 0.1

    sigma0_exp = 308.89 # WARNING: Old values.
    m0_exp = 535.39
    lambda0_exp = 1925.0

    sigma0_matern32 = 284.66
    m0_matern32 = 2139.1
    lambda0_matern32 = 651.58

    sigma0_matern52 = 258.40
    m0_matern52 = 2120.93
    lambda0_matern52 = 441.05

    myGP = InverseGaussianProcess(m0_matern32, sigma0_matern32,
            lambda0_matern32,
            volcano_coords, kernel,
            n_chunks=70, n_flush=50)

    for i in range(500, 500 + n_realizations):
        start = timer()
        prior_sample = myGP.sample_prior()
        end = timer()

        print("Prior sampling run in {}s.".format(end - start))
        np.save(os.path.join(output_path, "prior_sample_{}.npy".format(i)),
                prior_sample.detach().cpu().numpy())
def sample_posterior_strategy(strategy_folder, sample_nr, prior_sample_folder,
                              static_data_folder, n_samples):
    # Load static data.
    # Load static data.
    F = torch.from_numpy(
        np.load(os.path.join(static_data_folder,
                             "F_full_surface.npy"))).float().detach()
    grid = Grid.load(os.path.join(static_data_folder, "grid.pickle"))
    volcano_coords = torch.from_numpy(grid.cells).float().detach()

    # Create GP (trained Matern 32).
    data_std = 0.1
    sigma0_matern32 = 284.66
    m0_matern32 = 2139.1
    lambda0_matern32 = 651.58

    myGP = InverseGaussianProcess(m0_matern32,
                                  sigma0_matern32,
                                  lambda0_matern32,
                                  volcano_coords,
                                  kernel,
                                  n_chunks=200,
                                  n_flush=50)

    current_folder = os.path.join(strategy_folder,
                                  "sample_{}/".format(sample_nr))
    post_sample_folder = os.path.join(current_folder, "post_samples/")
    os.makedirs(post_sample_folder, exist_ok=True)

    # Load observed data.
    visited_inds = np.load(os.path.join(current_folder,
                                        "visited_inds.npy")).flatten()
    observed_data = torch.from_numpy(
        np.load(os.path.join(current_folder,
                             "observed_data.npy")).flatten().reshape(-1, 1))
    G = F[visited_inds, :]

    sample_posterior(myGP, n_samples, prior_sample_folder, G, observed_data,
                     data_std, post_sample_folder)
def prepare_groundtruth(data_path, prior_sample_path, post_sample_path,
                        post_data_sample_path):
    """ Given a realization from the prior, compute the corresponding
    conditional realization by updating.

    Parameters
    ----------
    data_path: string
        Path to the static data defining the situation (grid, forward, ...).
    prior_sample_path: string
        Path ta a realization from the prior.
    post_sample_path: float
        Where to save the computed posterior realization.
    post_data_sample_path: string
        Where to save the computed posterior realization of the data.

    """
    # Load
    F = torch.from_numpy(np.load(os.path.join(
        data_path, "F_niklas.npy"))).float().detach()
    F_full = torch.from_numpy(
        np.load(os.path.join(data_path,
                             "F_full_surface.npy"))).float().detach()

    grid = Grid.load(os.path.join(data_path, "grid.pickle"))
    volcano_coords = torch.from_numpy(grid.cells).float().detach()

    data_coords = torch.from_numpy(
        np.load(os.path.join(data_path, "niklas_data_coords.npy"))).float()
    data_coords_full = torch.from_numpy(
        np.load(os.path.join(data_path, "surface_data_coords.npy"))).float()
    data_values = torch.from_numpy(
        np.load(os.path.join(data_path, "niklas_data_obs.npy"))).float()

    print("Size of inversion grid: {} cells.".format(volcano_coords.shape[0]))
    print("Number of datapoints: {}.".format(data_coords.shape[0]))
    size = data_coords.shape[0] * volcano_coords.shape[0] * 4 / 1e9
    cov_size = (volcano_coords.shape[0])**2 * 4 / 1e9
    print("Size of Covariance matrix: {} GB.".format(cov_size))
    print("Size of Pushforward matrix: {} GB.".format(size))

    prior_sample = torch.from_numpy(np.load(prior_sample_path)).float()

    # HYPERPARAMETERS, small volcano.
    data_std = 0.1
    sigma0 = 359.49
    m0 = -114.40
    lambda0 = 338.46

    myGP = InverseGaussianProcess(m0,
                                  sigma0,
                                  lambda0,
                                  volcano_coords,
                                  kernel,
                                  logger=logger)

    start = timer()

    post_sample = myGP.update_sample(prior_sample, F, data_values, data_std)

    end = timer()
    print("Sample updating run in {}s.".format(end - start))

    np.save(post_sample_path, post_sample.detach().cpu().numpy())

    post_data_sample = F_full @ post_sample
    np.save(post_data_sample_path, post_data_sample.detach().cpu().numpy())
import numpy as np
import torch
import matplotlib.pyplot as plt

output_folder = "/home/cedric/PHD/Dev/VolcapySIAM/reporting/Toy1dProblems/results/"

n_cells_1d = 1000

my_problem = ToyFourier1d.build_problem(n_cells_1d)

m0 = 0.0
sigma0 = np.sqrt(2.0)
lambda0 = 0.4

myGP = InverseGaussianProcess(m0, sigma0, lambda0,
                              torch.tensor(my_problem.grid.cells).float(),
                              kernel)

# Build some ground truth by sampling.
ground_truth = myGP.sample_prior().detach().numpy()
np.save("./results/ground_truth.npy", ground_truth)
my_problem.grid.plot_values(ground_truth, cmap='jet')

data_values_re = my_problem.G_re @ ground_truth
data_values_im = my_problem.G_im @ ground_truth

# Learn 3 points.
G_pts = np.zeros((3, my_problem.grid.cells.shape[0]), dtype=np.float32)
G_pts[0, 250] = 1.0
G_pts[1, 500] = 1.0
G_pts[2, 750] = 1.0
示例#10
0
n_cells_1d = 50
n_data_1d = 1000
data_loc_offset = 1
data_loc_length = 4

my_problem = ToyInverseProblem2d.build_problem(n_cells_1d, n_data_1d,
                                               data_loc_offset,
                                               data_loc_length)

m0 = 1.0
sigma0 = 2.0
lambda0 = 0.2

myGP = InverseGaussianProcess(m0, sigma0, lambda0,
                              torch.tensor(my_problem.grid.cells).float(),
                              kernel)

# Build some ground truth by sampling.
ground_truth = myGP.sample_prior().detach().numpy()
np.save("ground_truth.npy", ground_truth)
my_problem.grid.plot_values(ground_truth, cmap='jet')

data_values = my_problem.G @ ground_truth
data_feed = lambda x: data_values[x]
data_std = 0.005

n_reals = 20

for i in range(1, 25, 4):
    print(i)
    'ytick.labelsize': 'x-small'
}
plt.rcParams.update(plot_params)

output_folder = "/home/cedric/PHD/Dev/VolcapySIAM/reporting/Toy1dProblems/results/"

n_cells_1d = 1000

my_problem = ToyFourier1d.build_problem(n_cells_1d)

m0 = 0.0
sigma0 = np.sqrt(2.0)
lambda0 = 0.4

myGP = InverseGaussianProcess(m0, sigma0, lambda0,
                              torch.tensor(my_problem.grid.cells).float(),
                              kernel)

# Build some ground truth by sampling.
"""
ground_truth = myGP.sample_prior().detach().numpy()
np.save("./results/ground_truth.npy", ground_truth)
"""
ground_truth = np.load("./ground_truth_cool.npy")

data_values_re = my_problem.G_re @ ground_truth
data_values_im = my_problem.G_im @ ground_truth

# Learn 3 points.
G_pts = np.zeros((3, my_problem.grid.cells.shape[0]), dtype=np.float32)
G_pts[0, 250] = 1.0
示例#12
0
output_folder = "/home/ubelix/stat/ct19x463/Dev/VolcapyProd/reporting/Toy2dProblems/Fourier/results_fill_big_05/"

n_cells_1d = 400

forward_cutoff = 200  # Only make 200 observations (Fourier and pointwise).

my_problem = ToyFourier2d.build_problem(n_cells_1d, forward_cutoff)
np.save("G_re.npy", my_problem.G_re)
np.save("G_im.npy", my_problem.G_im)

m0 = 1.0
sigma0 = 2.0
lambda0 = 0.5

myGP = InverseGaussianProcess(m0, sigma0, lambda0,
                              torch.tensor(my_problem.grid.cells).float(),
                              kernel)

# Build some ground truth by sampling.
ground_truth = np.load(os.path.join(output_folder, "ground_truth.npy"))
np.save(os.path.join(output_folder, "ground_truth.npy"), ground_truth)
my_problem.grid.plot_values(ground_truth, cmap='jet')

data_values_re = my_problem.G_re @ ground_truth
data_values_im = my_problem.G_im @ ground_truth

# Put 0.5% noise.
data_std = 0.005 * np.std(data_values_im)

# Make pointwise observations along a space-filling sequence.
from volcapy.utils import r_sequence