Esempio n. 1
0
def testInitGPNoAmp():
    """
    Test default GP initialization.

    Parameters
    ----------

    Returns
    -------
    """

    # For reproducibility
    m0 = 50
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.rosenbrockSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp with a ExpSquaredKernel
    gp = gpUtils.defaultGP(theta, y, fitAmp=False)

    errMsg = "ERROR: Default initialization with incorrect parameters!"
    true = [-31.02658091, -1.0552327, -1.16092752]
    assert np.allclose(true, gp.get_parameter_vector()), errMsg
Esempio n. 2
0
def test_1DBO():
    """
    Test Bayesian optimization of 1D function
    """

    # Define algorithm parameters
    m0 = 3                           # Size of initial training set
    bounds = [[-1, 2]]               # Prior bounds
    algorithm = "jones"              # Expected Utility from Jones et al. (1998)
    numNewPoints = 10                # Number of new design points to find
    seed = 57                        # RNG seed
    np.random.seed(seed)

    # First, directly minimize the objective to find the "true minimum"
    fn = lambda x : -(lh.testBOFn(x) + lh.testBOFnLnPrior(x))
    trueSoln = minimize(fn, lh.testBOFnSample(1), method="nelder-mead")

    # Sample design points from prior to create initial training set
    theta = lh.testBOFnSample(m0)

    # Evaluate forward model log likelihood + lnprior for each point
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.testBOFn(theta[ii]) + lh.testBOFnLnPrior(theta[ii])

    # Initialize default gp with an ExpSquaredKernel
    gp = gpUtils.defaultGP(theta, y, fitAmp=True)

    # Initialize object using the Wang & Li (2017) Rosenbrock function example
    ap = approx.ApproxPosterior(theta=theta,
                                y=y,
                                gp=gp,
                                lnprior=lh.testBOFnLnPrior,
                                lnlike=lh.testBOFn,
                                priorSample=lh.testBOFnSample,
                                bounds=bounds,
                                algorithm=algorithm)

    # Run the Bayesian optimization!
    soln = ap.bayesOpt(nmax=numNewPoints, tol=1.0e-3, seed=seed, verbose=False,
                       cache=False, gpMethod="powell", optGPEveryN=1,
                       nGPRestarts=3, nMinObjRestarts=5, initGPOpt=True,
                       minObjMethod="nelder-mead", findMAP=True,
                       gpHyperPrior=gpUtils.defaultHyperPrior)

    # Ensure estimated maximum and value are within 5% of the truth
    errMsg = "thetaBest is incorrect."
    assert(np.allclose(soln["thetaBest"], trueSoln["x"], rtol=5.0e-2)), errMsg

    errMsg = "Maximum function value is incorrect."
    assert(np.allclose(soln["valBest"], -trueSoln["fun"], rtol=5.0e-2)), errMsg

    # Same as above, but for MAP solution
    errMsg = "thetaBest is incorrect."
    assert(np.allclose(soln["thetaMAPBest"], trueSoln["x"], rtol=5.0e-2)), errMsg

    errMsg = "Maximum function value is incorrect."
    assert(np.allclose(soln["valMAPBest"], -trueSoln["fun"], rtol=5.0e-2)), errMsg
Esempio n. 3
0
def testMAPAmp():
    """
    Test MAP estimation
    """

    # Define algorithm parameters
    m0 = 20  # Initial size of training set
    bounds = [(-5, 5), (-5, 5)]  # Prior bounds
    algorithm = "jones"
    seed = 57  # For reproducibility
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.sphereSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.sphereLnlike(theta[ii]) + lh.sphereLnprior(theta[ii])

    # Create the the default GP using an ExpSquaredKernel
    gp = gpUtils.defaultGP(theta, y, fitAmp=True)

    # Initialize object using the Wang & Li (2017) Rosenbrock function example
    # Use default GP initialization: ExpSquaredKernel
    ap = approx.ApproxPosterior(theta=theta,
                                y=y,
                                gp=gp,
                                lnprior=lh.sphereLnprior,
                                lnlike=lh.sphereLnlike,
                                priorSample=lh.sphereSample,
                                bounds=bounds,
                                algorithm=algorithm)

    # Optimize the GP hyperparameters
    ap.optGP(seed=seed, method="powell", nGPRestarts=3)

    # Find some points to add to GP training set
    ap.findNextPoint(numNewPoints=5, nGPRestarts=3, cache=False)

    # Find MAP solution
    trueMAP = [0.0, 0.0]
    trueVal = 0.0
    testMAP, testVal = ap.findMAP(nRestarts=15)

    # Compare estimated MAP to true values, given some tolerance
    errMsg = "True MAP solution is incorrect."
    assert (np.allclose(trueMAP, testMAP, atol=1.0e-3)), errMsg
    errMsg = "True MAP function value is incorrect."
    assert (np.allclose(trueVal, testVal, atol=1.0e-3)), errMsg
Esempio n. 4
0
def testUtilsGPNoAmp():
    """
    Test the utility functions!  This probes the gp_utils.setup_gp function
    (which is rather straight-forward) and makes sure the utility functions
    produce the right result (which is also straight-forward). Based on the
    Wang+2017 Rosenbrock function example.

    Parameters
    ----------

    Returns
    -------
    """

    # For reproducibility
    m0 = 20
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.rosenbrockSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp
    gp = gpUtils.defaultGP(theta, y, fitAmp=False)

    # Compute the AGP utility function at some point
    thetaTest = np.array([-2.3573, 4.673])
    testUtil = ut.AGPUtility(thetaTest, y, gp, lh.rosenbrockLnprior)

    errMsg = "ERROR: AGP util fn bug.  Did you change gp_utils.setup_gp?"
    assert np.allclose(testUtil, 37.41585067, rtol=1.0e-4), errMsg

    # Now do the same using the BAPE utility function
    testUtil = ut.BAPEUtility(thetaTest, y, gp, lh.rosenbrockLnprior)

    errMsg = "ERROR: BAPE util fn bug.  Did you change gp_utils.setup_gp?"
    assert np.allclose(testUtil, 76.15271103, rtol=1.0e-4), errMsg

    # Now do the same using the Jones+1998 utility function
    testUtil = ut.JonesUtility(thetaTest, y, gp, lh.rosenbrockLnprior)

    errMsg = "ERROR: Jones util fn bug.  Did you change gp_utils.setup_gp?"
    assert np.allclose(testUtil, 0, rtol=1.0e-4), errMsg
Esempio n. 5
0
def testFindNoAmp():
    """
    Test the findNextPoint function.
    """

    # Define algorithm parameters
    m0 = 50  # Initial size of training set
    bounds = ((-5, 5), (-5, 5))  # Prior bounds
    algorithm = "bape"

    # For reproducibility
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    # Note: adding corner cases because approxposterior loves corners
    theta = np.array(list(lh.rosenbrockSample(m0)) + [[-5, 5], [5, 5]])

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp
    gp = gpUtils.defaultGP(theta, y, fitAmp=False)

    # Initialize object using the Wang & Li (2017) Rosenbrock function example
    # using default ExpSquaredKernel GP
    ap = approx.ApproxPosterior(theta=theta,
                                y=y,
                                gp=gp,
                                lnprior=lh.rosenbrockLnprior,
                                lnlike=lh.rosenbrockLnlike,
                                priorSample=lh.rosenbrockSample,
                                bounds=bounds,
                                algorithm=algorithm)

    # Find new point!
    thetaT = ap.findNextPoint(computeLnLike=False, bounds=bounds, seed=seed)

    err_msg = "findNextPoint selected incorrect thetaT."
    assert (np.allclose(thetaT, [0.79813416, 0.85542199],
                        rtol=1.0e-3)), err_msg
Esempio n. 6
0
def testGPOptNoAmp():
    """
    Test optimizing the GP hyperparameters.

    Parameters
    ----------

    Returns
    -------
    """

    # For reproducibility
    m0 = 50
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.rosenbrockSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp
    gp = gpu.defaultGP(theta, y, fitAmp=False)

    # Optimize gp using default opt parameters
    gp = gpu.optimizeGP(gp, theta, y, seed=seed, nGPRestarts=5)

    # Extract GP hyperparameters, compare to truth
    # Ignore mean fit - just focus on scale lengths and amplitude
    hypeTest = gp.get_parameter_vector()[1:]
    print(hypeTest)

    errMsg = "ERROR: GP hyperparameters are not close to the true value!"
    hypeTrue = [-1.54256578, 3.24723589]
    assert np.allclose(hypeTest, hypeTrue, rtol=1.0e-2), errMsg
Esempio n. 7
0
m0 = 10  # Size of training set
bounds = ((-5, 5), (-5, 5))  # Prior bounds
algorithm = "bape"  # Use the Kandasamy et al. (2017) formalism
seed = 57  # RNG seed
np.random.seed(seed)

# Sample design points from prior
theta = lh.sphereSample(m0)

# Evaluate forward model log likelihood + lnprior for each point
y = np.zeros(len(theta))
for ii in range(len(theta)):
    y[ii] = lh.sphereLnlike(theta[ii]) + lh.sphereLnprior(theta[ii])

# Initialize default gp with an ExpSquaredKernel
gp = gpUtils.defaultGP(theta, y, white_noise=-12, fitAmp=True)

# Initialize approxposterior object
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=lh.sphereLnprior,
                            lnlike=lh.sphereLnlike,
                            priorSample=lh.sphereSample,
                            bounds=bounds,
                            algorithm=algorithm)

# Optimize the GP hyperparameters
ap.optGP(seed=seed, method="powell", nGPRestarts=1)

# Find MAP solution and function value at MAP
Esempio n. 8
0
        y[ii] = mcmcUtils.LnLike(theta[ii], **
                                 kwargs)[0] + trappist1.LnPriorTRAPPIST1(
                                     theta[ii], **kwargs)
    np.savez("apRunAPFModelCache.npz", theta=theta, y=y)

else:
    print("Loading in cached simulations...")
    sims = np.load("apRunAPFModelCache.npz")
    theta = sims["theta"]
    y = sims["y"]
    print(theta.shape)

### Initialize GP ###

# Use ExpSquared kernel, the approxposterior default option
gp = gpUtils.defaultGP(theta, y, white_noise=-15, fitAmp=False)

# Initialize approxposterior
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=trappist1.LnPriorTRAPPIST1,
                            lnlike=mcmcUtils.LnLike,
                            priorSample=trappist1.samplePriorTRAPPIST1,
                            bounds=bounds,
                            algorithm=algorithm)

# Run!
ap.run(m=m,
       nmax=nmax,
       estBurnin=True,
Esempio n. 9
0
m_low_arr = np.ones_like(mlims_f_j)*m_low
m_high_arr = np.ones_like(mlims_f_j)*m_high
total_obs = sum(len(m) for m in mlims_f_j)
plims_f_bar = 1/(m_high-m_low)**(total_obs)*(1-sum(P_f))
plims_T = 1/(m_high-m_low)**(total_obs)

theta = np.array([[np.random.uniform(low=-17,high=-12),np.random.uniform(low=0,high=1)] for i in range(100)])
y = np.array([kne_inf.ln_prob(th,mlims_f_j,T_f_j,t0,p_d_f,P_f,P_A,P_T,f_bar_sum,plims_f_bar,plims_T,m_low_arr,m_high_arr) for th in theta])
np.savetxt('theta.txt',theta)
np.savetxt('y.txt',y)
y_no_inf = np.nan_to_num(y,neginf=-600.)
y_no_inf[y_no_inf<=-600.] = -600.
#theta = np.loadtxt('theta.txt')
#y_no_inf = np.nan_to_num(np.loadtxt('y.txt'),neginf=-710.)
gp = gpUtils.defaultGP(theta, y_no_inf, white_noise=-12)

m=200
nmax=3
bounds = [(-17,-12),(0,1)]
algorithm = 'bape'
samplerKwargs = {"nwalkers" : 20}
mcmcKwargs = {"iterations" : 8000}

ap = approx.ApproxPosterior(theta=theta, y=y_no_inf, gp=gp, lnprior=kne_inf.ln_prior, lnlike=kne_inf.ln_likelihood,
                            priorSample=prior_sample, algorithm=algorithm, bounds=bounds)

ap.run(m=m, convergenceCheck=True, estBurnin=True, nGPRestarts=3, mcmcKwargs=mcmcKwargs,
       cache=False, samplerKwargs=samplerKwargs, verbose=True, thinChains=True,
       optGPEveryN=5,args=[mlims_f_j,T_f_j,t0,p_d_f,P_f,P_A,P_T,f_bar_sum,plims_f_bar,plims_T,m_low_arr,m_high_arr])
Esempio n. 10
0
    for iTrial in range(iTrainInit):
        print("Training simulation: %d" % iTrial)
        theta[iTrial,:] = SampleStateVector()
        y[iTrial] = LnLike(theta[iTrial], **kwargs)[0] + LnPrior(theta[iTrial], **kwargs)
    #np.savez("apRunAPFModelCache.npz", theta=theta, y=y)

else:
    print("Loading in cached simulations...")
    sims = np.load("apRunAPFModelCache.npz")
    theta = sims["theta"]
    y = sims["y"]

### Initialize GP ###

# Use ExpSquared kernel, the approxposterior default option
gp = gpUtils.defaultGP(theta, y, order=None, white_noise=-12)

# Initialize approxposterior
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=LnPrior,
                            lnlike=LnLike,
                            priorSample=SampleStateVector,
                            bounds=daBounds,
                            algorithm=sAlgorithm)

# Run!
ap.run(m=iNewPoints, nmax=iMaxIter, estBurnin=True, mcmcKwargs=mcmcKwargs,
       thinChains=True,samplerKwargs=samplerKwargs, verbose=True,
       nGPRestarts=iGPRestarts,nMinObjRestarts=iMinObjRestarts,