コード例 #1
0
def testTestFns():
    """
    Test test likelihood and optimization functions from likelihood.py.
    """

    # Check 2D Rosenbrock function, compare to the known global minimum
    test = lh.rosenbrockLnlike([1, 1])
    errMsg = "2D Rosenbrock function is incorrect"
    truth = 0
    assert np.allclose(test, truth), errMsg

    # Check 5D Rosenbrock function, compare to the known global minimum
    test = lh.rosenbrockLnlike([1, 1, 1, 1, 1])
    errMsg = "5D Rosenbrock function is incorrect"
    truth = 0
    assert np.allclose(test, truth), errMsg

    # Check sphere function, compare to the known global minimum
    test = lh.sphereLnlike([0, 0])
    errMsg = "Sphere function is incorrect"
    truth = 0
    assert np.allclose(test, truth), errMsg

    # Check 1D BayesOpt test function, compare to the known global maximum
    test = lh.testBOFn(-0.359)
    errMsg = "1D test BayesOpt function is incorrect"
    truth = 0.5003589
    assert np.allclose(test, truth), errMsg
コード例 #2
0
ファイル: test_InitGP.py プロジェクト: syrte/approxposterior
def testInitGPNoAmp():
    """
    Test default GP initialization.

    Parameters
    ----------

    Returns
    -------
    """

    # For reproducibility
    m0 = 50
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.rosenbrockSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp with a ExpSquaredKernel
    gp = gpUtils.defaultGP(theta, y, fitAmp=False)

    errMsg = "ERROR: Default initialization with incorrect parameters!"
    true = [-31.02658091, -1.0552327, -1.16092752]
    assert np.allclose(true, gp.get_parameter_vector()), errMsg
コード例 #3
0
ファイル: test_GPUtil.py プロジェクト: syrte/approxposterior
def testUtilsGPNoAmp():
    """
    Test the utility functions!  This probes the gp_utils.setup_gp function
    (which is rather straight-forward) and makes sure the utility functions
    produce the right result (which is also straight-forward). Based on the
    Wang+2017 Rosenbrock function example.

    Parameters
    ----------

    Returns
    -------
    """

    # For reproducibility
    m0 = 20
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.rosenbrockSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp
    gp = gpUtils.defaultGP(theta, y, fitAmp=False)

    # Compute the AGP utility function at some point
    thetaTest = np.array([-2.3573, 4.673])
    testUtil = ut.AGPUtility(thetaTest, y, gp, lh.rosenbrockLnprior)

    errMsg = "ERROR: AGP util fn bug.  Did you change gp_utils.setup_gp?"
    assert np.allclose(testUtil, 37.41585067, rtol=1.0e-4), errMsg

    # Now do the same using the BAPE utility function
    testUtil = ut.BAPEUtility(thetaTest, y, gp, lh.rosenbrockLnprior)

    errMsg = "ERROR: BAPE util fn bug.  Did you change gp_utils.setup_gp?"
    assert np.allclose(testUtil, 76.15271103, rtol=1.0e-4), errMsg

    # Now do the same using the Jones+1998 utility function
    testUtil = ut.JonesUtility(thetaTest, y, gp, lh.rosenbrockLnprior)

    errMsg = "ERROR: Jones util fn bug.  Did you change gp_utils.setup_gp?"
    assert np.allclose(testUtil, 0, rtol=1.0e-4), errMsg
コード例 #4
0
def testFindNoAmp():
    """
    Test the findNextPoint function.
    """

    # Define algorithm parameters
    m0 = 50  # Initial size of training set
    bounds = ((-5, 5), (-5, 5))  # Prior bounds
    algorithm = "bape"

    # For reproducibility
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    # Note: adding corner cases because approxposterior loves corners
    theta = np.array(list(lh.rosenbrockSample(m0)) + [[-5, 5], [5, 5]])

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp
    gp = gpUtils.defaultGP(theta, y, fitAmp=False)

    # Initialize object using the Wang & Li (2017) Rosenbrock function example
    # using default ExpSquaredKernel GP
    ap = approx.ApproxPosterior(theta=theta,
                                y=y,
                                gp=gp,
                                lnprior=lh.rosenbrockLnprior,
                                lnlike=lh.rosenbrockLnlike,
                                priorSample=lh.rosenbrockSample,
                                bounds=bounds,
                                algorithm=algorithm)

    # Find new point!
    thetaT = ap.findNextPoint(computeLnLike=False, bounds=bounds, seed=seed)

    err_msg = "findNextPoint selected incorrect thetaT."
    assert (np.allclose(thetaT, [0.79813416, 0.85542199],
                        rtol=1.0e-3)), err_msg
コード例 #5
0
def testGPOptNoAmp():
    """
    Test optimizing the GP hyperparameters.

    Parameters
    ----------

    Returns
    -------
    """

    # For reproducibility
    m0 = 50
    seed = 57
    np.random.seed(seed)

    # Randomly sample initial conditions from the prior
    theta = np.array(lh.rosenbrockSample(m0))

    # Evaluate forward model log likelihood + lnprior for each theta
    y = np.zeros(len(theta))
    for ii in range(len(theta)):
        y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(
            theta[ii])

    # Set up a gp
    gp = gpu.defaultGP(theta, y, fitAmp=False)

    # Optimize gp using default opt parameters
    gp = gpu.optimizeGP(gp, theta, y, seed=seed, nGPRestarts=5)

    # Extract GP hyperparameters, compare to truth
    # Ignore mean fit - just focus on scale lengths and amplitude
    hypeTest = gp.get_parameter_vector()[1:]
    print(hypeTest)

    errMsg = "ERROR: GP hyperparameters are not close to the true value!"
    hypeTrue = [-1.54256578, 3.24723589]
    assert np.allclose(hypeTest, hypeTrue, rtol=1.0e-2), errMsg
コード例 #6
0
ファイル: example.py プロジェクト: dflemin3/thesis
# Define algorithm parameters
m0 = 50  # Initial size of training set
m = 20  # Num points to find each iteration
nmax = 2  # Maximum number of iterations
bounds = [(-5, 5), (-5, 5)]  # Prior bounds
algorithm = "bape"  # Use the Kandasamy et al. (2017) formalism
np.random.seed(57)
samplerKwargs = {"nwalkers": 20}  # emcee.EnsembleSampler
mcmcKwargs = {"iterations": int(2.0e4)}  # emcee.EnsembleSampler.run_mcmc

# Evaluate model lnlikelihood + lnprior for each theta sampled from prior
theta = lh.rosenbrockSample(m0)
y = np.zeros(len(theta))
for ii in range(len(theta)):
    y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(theta[ii])

# Use default GP with squared exponential kernel
gp = gpUtils.defaultGP(theta, y, white_noise=-12)

# Initialize object using the Wang & Li (2018) Rosenbrock function example
ap = approx.ApproxPosterior(theta=theta,
                            y=y,
                            gp=gp,
                            lnprior=lh.rosenbrockLnprior,
                            lnlike=lh.rosenbrockLnlike,
                            priorSample=lh.rosenbrockSample,
                            bounds=bounds,
                            algorithm=algorithm)

# Run!