Пример #1
0
def test_1DBO():
    """
    Test Bayesian optimization of 1D function
    """

    # Define algorithm parameters
    m0 = 3                           # Size of initial training set
    bounds = [[-1, 2]]               # Prior bounds
    algorithm = "jones"              # Expected Utility from Jones et al. (1998)
    numNewPoints = 10                # Number of new design points to find
    seed = 57                        # RNG seed
    np.random.seed(seed)

    # First, directly minimize the objective to find the "true minimum"
    fn = lambda x : -(lh.testBOFn(x) + lh.testBOFnLnPrior(x))
    trueSoln = minimize(fn, lh.testBOFnSample(1), method="nelder-mead")

    # Sample design points from prior to create initial training set
    theta = lh.testBOFnSample(m0)

    # Evaluate forward model log likelihood + lnprior for each point
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.testBOFn(theta[ii]) + lh.testBOFnLnPrior(theta[ii])

    # Initialize default gp with an ExpSquaredKernel
    gp = gpUtils.defaultGP(theta, y, fitAmp=True)

    # Initialize object using the Wang & Li (2017) Rosenbrock function example
    ap = approx.ApproxPosterior(theta=theta,
                                y=y,
                                gp=gp,
                                lnprior=lh.testBOFnLnPrior,
                                lnlike=lh.testBOFn,
                                priorSample=lh.testBOFnSample,
                                bounds=bounds,
                                algorithm=algorithm)

    # Run the Bayesian optimization!
    soln = ap.bayesOpt(nmax=numNewPoints, tol=1.0e-3, seed=seed, verbose=False,
                       cache=False, gpMethod="powell", optGPEveryN=1,
                       nGPRestarts=3, nMinObjRestarts=5, initGPOpt=True,
                       minObjMethod="nelder-mead", findMAP=True,
                       gpHyperPrior=gpUtils.defaultHyperPrior)

    # Ensure estimated maximum and value are within 5% of the truth
    errMsg = "thetaBest is incorrect."
    assert(np.allclose(soln["thetaBest"], trueSoln["x"], rtol=5.0e-2)), errMsg

    errMsg = "Maximum function value is incorrect."
    assert(np.allclose(soln["valBest"], -trueSoln["fun"], rtol=5.0e-2)), errMsg

    # Same as above, but for MAP solution
    errMsg = "thetaBest is incorrect."
    assert(np.allclose(soln["thetaMAPBest"], trueSoln["x"], rtol=5.0e-2)), errMsg

    errMsg = "Maximum function value is incorrect."
    assert(np.allclose(soln["valMAPBest"], -trueSoln["fun"], rtol=5.0e-2)), errMsg
Пример #2
0
def testMAPAmp():
    """
    Test MAP estimation
    """

    # Define algorithm parameters
    m0 = 20  # Initial size of training set
    bounds = [(-5, 5), (-5, 5)]  # Prior bounds
    algorithm = "jones"
    seed = 57  # For reproducibility
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.sphereSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.sphereLnlike(theta[ii]) + lh.sphereLnprior(theta[ii])

    # Create the the default GP using an ExpSquaredKernel
    gp = gpUtils.defaultGP(theta, y, fitAmp=True)

    # Initialize object using the Wang & Li (2017) Rosenbrock function example
    # Use default GP initialization: ExpSquaredKernel
    ap = approx.ApproxPosterior(theta=theta,
                                y=y,
                                gp=gp,
                                lnprior=lh.sphereLnprior,
                                lnlike=lh.sphereLnlike,
                                priorSample=lh.sphereSample,
                                bounds=bounds,
                                algorithm=algorithm)

    # Optimize the GP hyperparameters
    ap.optGP(seed=seed, method="powell", nGPRestarts=3)

    # Find some points to add to GP training set
    ap.findNextPoint(numNewPoints=5, nGPRestarts=3, cache=False)

    # Find MAP solution
    trueMAP = [0.0, 0.0]
    trueVal = 0.0
    testMAP, testVal = ap.findMAP(nRestarts=15)

    # Compare estimated MAP to true values, given some tolerance
    errMsg = "True MAP solution is incorrect."
    assert (np.allclose(trueMAP, testMAP, atol=1.0e-3)), errMsg
    errMsg = "True MAP function value is incorrect."
    assert (np.allclose(trueVal, testVal, atol=1.0e-3)), errMsg
Пример #3
0
def testFindNoAmp():
    """
    Test the findNextPoint function.
    """

    # Define algorithm parameters
    m0 = 50  # Initial size of training set
    bounds = ((-5, 5), (-5, 5))  # Prior bounds
    algorithm = "bape"

    # For reproducibility
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    # Note: adding corner cases because approxposterior loves corners
    theta = np.array(list(lh.rosenbrockSample(m0)) + [[-5, 5], [5, 5]])

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp
    gp = gpUtils.defaultGP(theta, y, fitAmp=False)

    # Initialize object using the Wang & Li (2017) Rosenbrock function example
    # using default ExpSquaredKernel GP
    ap = approx.ApproxPosterior(theta=theta,
                                y=y,
                                gp=gp,
                                lnprior=lh.rosenbrockLnprior,
                                lnlike=lh.rosenbrockLnlike,
                                priorSample=lh.rosenbrockSample,
                                bounds=bounds,
                                algorithm=algorithm)

    # Find new point!
    thetaT = ap.findNextPoint(computeLnLike=False, bounds=bounds, seed=seed)

    err_msg = "findNextPoint selected incorrect thetaT."
    assert (np.allclose(thetaT, [0.79813416, 0.85542199],
                        rtol=1.0e-3)), err_msg
Пример #4
0
# Sample design points from prior
theta = lh.sphereSample(m0)

# Evaluate forward model log likelihood + lnprior for each point
y = np.zeros(len(theta))
for ii in range(len(theta)):
    y[ii] = lh.sphereLnlike(theta[ii]) + lh.sphereLnprior(theta[ii])

# Initialize default gp with an ExpSquaredKernel
gp = gpUtils.defaultGP(theta, y, white_noise=-12, fitAmp=True)

# Initialize approxposterior object
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=lh.sphereLnprior,
                            lnlike=lh.sphereLnlike,
                            priorSample=lh.sphereSample,
                            bounds=bounds,
                            algorithm=algorithm)

# Optimize the GP hyperparameters
ap.optGP(seed=seed, method="powell", nGPRestarts=1)

# Find MAP solution and function value at MAP
MAP, val = ap.findMAP(nRestarts=5)

print("Approximate MAP:", MAP)
print("GP mean function value at MAP:", val)
print("True minimum: (0,0), True function value at minimum: 0")

# Plot MAP solution on top of grid of objective function evaluations
Пример #5
0
    print("Loading in cached simulations...")
    sims = np.load("apRunAPFModelCache.npz")
    theta = sims["theta"]
    y = sims["y"]
    print(theta.shape)

### Initialize GP ###

# Use ExpSquared kernel, the approxposterior default option
gp = gpUtils.defaultGP(theta, y, white_noise=-15, fitAmp=False)

# Initialize approxposterior
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=trappist1.LnPriorTRAPPIST1,
                            lnlike=mcmcUtils.LnLike,
                            priorSample=trappist1.samplePriorTRAPPIST1,
                            bounds=bounds,
                            algorithm=algorithm)

# Run!
ap.run(m=m,
       nmax=nmax,
       estBurnin=True,
       mcmcKwargs=mcmcKwargs,
       thinChains=True,
       samplerKwargs=samplerKwargs,
       verbose=True,
       nGPRestarts=nGPRestarts,
       nMinObjRestarts=nMinObjRestarts,
       optGPEveryN=optGPEveryN,
Пример #6
0
np.savetxt('theta.txt',theta)
np.savetxt('y.txt',y)
y_no_inf = np.nan_to_num(y,neginf=-600.)
y_no_inf[y_no_inf<=-600.] = -600.
#theta = np.loadtxt('theta.txt')
#y_no_inf = np.nan_to_num(np.loadtxt('y.txt'),neginf=-710.)
gp = gpUtils.defaultGP(theta, y_no_inf, white_noise=-12)

m=200
nmax=3
bounds = [(-17,-12),(0,1)]
algorithm = 'bape'
samplerKwargs = {"nwalkers" : 20}
mcmcKwargs = {"iterations" : 8000}

ap = approx.ApproxPosterior(theta=theta, y=y_no_inf, gp=gp, lnprior=kne_inf.ln_prior, lnlike=kne_inf.ln_likelihood,
                            priorSample=prior_sample, algorithm=algorithm, bounds=bounds)

ap.run(m=m, convergenceCheck=True, estBurnin=True, nGPRestarts=3, mcmcKwargs=mcmcKwargs,
       cache=False, samplerKwargs=samplerKwargs, verbose=True, thinChains=True,
       optGPEveryN=5,args=[mlims_f_j,T_f_j,t0,p_d_f,P_f,P_A,P_T,f_bar_sum,plims_f_bar,plims_T,m_low_arr,m_high_arr])

samples = ap.sampler.get_chain(discard=ap.iburns[-1], flat=True, thin=ap.ithins[-1])

# Corner plot!
fig = corner.corner(samples, quantiles=[0.16, 0.5, 0.84], show_titles=True,
                    range=[(-17,-12),(0,1)],scale_hist=True, plot_contours=True,labels=[r"$M0$",r"$\gamma$"],title_kwargs={"fontsize": 14})
plt.savefig(fname='../plots/uniform-likelihood-simplified-2f-1-0-PA0p2-22-26-26-26-and-21-27-27-27.png',format='png')



Пример #7
0
mcmcKwargs = {"iterations": int(2.0e4)}  # emcee.EnsembleSampler.run_mcmc

# Evaluate model lnlikelihood + lnprior for each theta sampled from prior
theta = lh.rosenbrockSample(m0)
y = np.zeros(len(theta))
for ii in range(len(theta)):
    y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(theta[ii])

# Use default GP with squared exponential kernel
gp = gpUtils.defaultGP(theta, y, white_noise=-12)

# Initialize object using the Wang & Li (2018) Rosenbrock function example
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=lh.rosenbrockLnprior,
                            lnlike=lh.rosenbrockLnlike,
                            priorSample=lh.rosenbrockSample,
                            bounds=bounds,
                            algorithm=algorithm)

# Run!
ap.run(m=m,
       nmax=nmax,
       estBurnin=True,
       nGPRestarts=3,
       mcmcKwargs=mcmcKwargs,
       ache=False,
       samplerKwargs=samplerKwargs,
       verbose=True,
       thinChains=False,
       onlyLastMCMC=True)
Пример #8
0
# Sample design points from prior to create initial training set
theta = lh.testBOFnSample(m0)

# Evaluate forward model log likelihood + lnprior for each point
y = np.zeros(len(theta))
for ii in range(len(theta)):
    y[ii] = lh.testBOFn(theta[ii]) + lh.testBOFnLnPrior(theta[ii])

# Initialize default gp with an ExpSquaredKernel
gp = gpUtils.defaultGP(theta, y, white_noise=-12, fitAmp=True)

# Initialize object using the Wang & Li (2017) Rosenbrock function example
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=lh.testBOFnLnPrior,
                            lnlike=lh.testBOFn,
                            priorSample=lh.testBOFnSample,
                            bounds=bounds,
                            algorithm=algorithm)

# Run the Bayesian optimization!
soln = ap.bayesOpt(nmax=numNewPoints,
                   tol=1.0e-3,
                   kmax=3,
                   seed=seed,
                   verbose=False,
                   cache=False,
                   gpMethod="powell",
                   optGPEveryN=1,
                   nGPRestarts=2,
                   nMinObjRestarts=5,
Пример #9
0
    #np.savez("apRunAPFModelCache.npz", theta=theta, y=y)

else:
    print("Loading in cached simulations...")
    sims = np.load("apRunAPFModelCache.npz")
    theta = sims["theta"]
    y = sims["y"]

### Initialize GP ###

# Use ExpSquared kernel, the approxposterior default option
gp = gpUtils.defaultGP(theta, y, order=None, white_noise=-12)

# Initialize approxposterior
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=LnPrior,
                            lnlike=LnLike,
                            priorSample=SampleStateVector,
                            bounds=daBounds,
                            algorithm=sAlgorithm)

# Run!
ap.run(m=iNewPoints, nmax=iMaxIter, estBurnin=True, mcmcKwargs=mcmcKwargs,
       thinChains=True,samplerKwargs=samplerKwargs, verbose=True,
       nGPRestarts=iGPRestarts,nMinObjRestarts=iMinObjRestarts,
       optGPEveryN=iGPOptInterval,seed=iSeed, cache=True, convergenceCheck=True,
       eps=dConvergenceLimit,kmax=iNumConverged,**kwargs)
# Done!