Beispiel #1
0
def run_mogp_analysis(analysis_samples, known_value, threshold, results_dir):

    input_points, results, ed = load_results(results_dir)

    # fit GP to simulations

    gp = mogp_emulator.GaussianProcess(input_points, results)
    gp.learn_hyperparameters()

    # We can now make predictions for a large number of input points much
    # more quickly than running the simulation.

    analysis_points = ed.sample(analysis_samples)
    predictions = gp.predict(analysis_points)

    # set up history matching

    hm = mogp_emulator.HistoryMatching(obs=known_value,
                                       expectations=predictions,
                                       threshold=threshold)

    implaus = hm.get_implausibility()
    NROY = hm.get_NROY()

    # make some plots

    plt.figure()
    plt.plot(analysis_points[NROY, 0], analysis_points[NROY, 1], 'o')
    plt.xlabel('Normal Stress (MPa)')
    plt.ylabel('Shear to Normal Stress Ratio')
    plt.xlim((-120., -80.))
    plt.ylim((0.1, 0.4))
    plt.title("NROY Points")
    plt.savefig("results/nroy.png")

    import matplotlib.tri

    plt.figure()
    tri = matplotlib.tri.Triangulation(-(analysis_points[:, 0] - 80.) / 40.,
                                       (analysis_points[:, 1] - 0.1) / 0.3)
    plt.tripcolor(analysis_points[:, 0],
                  analysis_points[:, 1],
                  tri.triangles,
                  implaus,
                  vmin=0.,
                  vmax=6.,
                  cmap="viridis_r")
    cb = plt.colorbar()
    cb.set_label("Implausibility")
    plt.xlabel('Normal Stress (MPa)')
    plt.ylabel('Shear to Normal Stress Ratio')
    plt.title("Implausibility Metric")
    plt.savefig("results/implausibility.png")
for d in range(n_samples):
    next_point = md2.get_next_point()
    next_target = simulator(next_point)
    md2.set_next_target(next_target)

# look at design and outputs

inputs = md2.get_inputs()
targets = md2.get_targets()

print("Final inputs:\n", inputs)
print("Final targets:\n", targets)

# look at final GP emulator and make some predictions to compare with lhd

lhd_design = lhd.sample(n_init + n_samples)

gp_lhd = mogp_emulator.fit_GP_MAP(lhd_design, np.array([simulator(p) for p in lhd_design]))

gp_mice = mogp_emulator.GaussianProcess(inputs, targets)

gp_mice = mogp_emulator.fit_GP_MAP(inputs, targets)

test_points = lhd.sample(10)

print("LHD:")
print_results(test_points, gp_lhd(test_points))
print()
print("MICE:")
print_results(test_points, gp_mice(test_points))
inputs = ed.sample(n_samples)

# run simulation

targets = np.array([simulator(p) for p in inputs])

###################################################################################

# First example -- fit GP using MLE and Squared Exponential Kernel and predict

print("Example 1: Basic GP")

# create GP and then fit using MLE

gp = mogp_emulator.GaussianProcess(inputs, targets)

gp = mogp_emulator.fit_GP_MAP(gp)

# create 20 target points to predict

predict_points = ed.sample(n_preds)

means, variances, derivs = gp.predict(predict_points)

print_results(predict_points, means)

###################################################################################

# Second Example: How to change the kernel, use a fixed nugget, and create directly using fitting function
Beispiel #4
0
# We will use a Latin Hypercube Design. To specify, we give the distribution that we would like
# the parameter to take. By default, we assume a uniform distribution between two endpoints, which
# we will use for this simulation.

# Once we construct the design, can draw a specified number of samples as shown.

lhd = mogp_emulator.LatinHypercubeDesign([(-5., 1.), (0., 1000.)])

n_simulations = 50
simulation_points = lhd.sample(n_simulations)
simulation_output = np.array([simulator(p) for p in simulation_points])

# Next, fit the surrogate GP model using MLE (MAP with uniform priors)
# Print out hyperparameter values as correlation lengths and sigma

gp = mogp_emulator.GaussianProcess(simulation_points, simulation_output)
gp = mogp_emulator.fit_GP_MAP(gp)

print("Correlation lengths = {}".format(np.sqrt(np.exp(-gp.theta[:2]))))
print("Sigma = {}".format(np.sqrt(np.exp(gp.theta[2]))))

# Validate emulator by comparing to true simulated value
# To compare with the emulator, use the predict method to get mean and variance
# values for the emulator predictions and see how many are within 2 standard
# deviations

n_valid = 10
validation_points = lhd.sample(n_valid)
validation_output = np.array([simulator(p) for p in validation_points])

predictions = gp.predict(validation_points)
Beispiel #5
0
# when constructing a robust emulator. This tutorial illustrates how we can build
# better emulators using the tools in mogp_emulator.

# We need to draw some samples from the space to run some simulations and build our
# emulators. We use a LHD design with only 6 sample points.

lhd = mogp_emulator.LatinHypercubeDesign([(-4., 0.), (0., 1000.)])

n_simulations = 6
simulation_points = lhd.sample(n_simulations)
simulation_output = np.array([simulator(p) for p in simulation_points])

# Next, fit the surrogate GP model using MLE, zero mean, and no priors.
# Print out hyperparameter values as correlation lengths, sigma, and nugget

gp = mogp_emulator.GaussianProcess(simulation_points, simulation_output)
gp = mogp_emulator.fit_GP_MAP(gp)

print("Zero mean and no priors:")
print("Correlation lengths = {}".format(np.sqrt(np.exp(-gp.theta[:2]))))
print("Sigma = {}".format(np.sqrt(np.exp(gp.theta[2]))))
print("Nugget = {}".format(gp.nugget))
print()

# We can look at how the emulator performs by comparing the emulator output to
# a large number of validation points. Since this simulation is cheap, we can
# actually compute this for a large number of points.

n_valid = 1000
validation_points = lhd.sample(n_valid)
validation_output = np.array([simulator(p) for p in validation_points])
Beispiel #6
0
inputs = ed.sample(20)

# run simulation

targets = np.array([f(p) for p in inputs])

###################################################################################

# First example -- fit GP using MLE and Squared Exponential Kernel and predict

print("Example 1: Basic GP")

# create GP and fit using MLE

gp = mogp_emulator.GaussianProcess(inputs, targets)

gp.learn_hyperparameters()

# create 20 target points to predict

predict_points = ed.sample(10)

means, variances, derivs = gp.predict(predict_points)

for pp, m in zip(predict_points, means):
    print("Target point: {}      Predicted mean: {}".format(pp, m))

###################################################################################

# Second Example: How to change the kernel
counter = 1

for point in input_points:
    name = "simulation_{}".format(counter)
    create_problem(point, name=name)
    run_simulation(name=name, n_proc=4)
    result = compute_moment(name=name)
    results.append(result)
    counter += 1

results = np.array(results)

# Now fit a Gaussian Process to the input_points and results to fit the approximate model. We use
# the maximum marginal likelihood method to estimate the GP hyperparameters

gp = mogp_emulator.GaussianProcess(input_points, results)
gp.learn_hyperparameters()

# We can now make predictions for a large number of input points much more quickly than running the
# simulation. For instance, let's sample 10000 points

analysis_samples = 10000

analysis_points = ed.sample(analysis_samples)
predictions = gp.predict(analysis_points)

# predictions contains both the mean values and variances from the approximate model, so we can use this
# to quantify uncertainty given a known value of the moment.

# Since we don't have an actual observation to use, we will do a synthetic test by running an additional
# point so we can evaluate the results from the known inputs.