def __init__(self, mean, covariance, points, lnlikes, quiet=True):
        self.mean = mean
        self.cov = covariance

        #Let's try only interpolating over points that are
        #better than, say, 10-ish sigma
        dof = len(self.mean)
        inds = np.fabs(np.max(lnlikes) - lnlikes) < 100 * dof
        print(inds)
        print(lnlikes)

        self.points = points[inds]
        self.lnlikes_true = lnlikes[inds]
        self.lnlike_max = np.max(lnlikes[inds])
        self.lnlikes = lnlikes[inds] - self.lnlike_max  #max is now at 0
        self.x = self._transform_data(self.points)

        print(max(self.lnlikes), min(self.lnlikes))

        _guess = 4.5  #kernel length guess
        kernel = kernels.ExpSquaredKernel(metric=_guess, ndim=dof)
        lnPmin = np.min(self.lnlikes)
        gp = GP(kernel, mean=lnPmin - np.fabs(lnPmin * 3))

        gp.compute(self.x)

        def neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(self.lnlikes)

        def grad_neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(self.lnlikes)

        result = minimize(neg_ln_likelihood,
                          gp.get_parameter_vector(),
                          jac=grad_neg_ln_likelihood)
        if not quiet:
            print(result)
        gp.set_parameter_vector(result.x)
        self.gp = gp
Ejemplo n.º 2
0
    def train(self, kernel=None):
        """Train a Gaussian Process to interpolate the log-likelihood
        of the training samples.

        Args:
            kernel (george.kernels.Kernel object): kernel to use, or any 
                acceptable object that can be accepted by the george.GP object

        """
        inds = self.training_inds
        x = self.chain_rotated_regularized[inds]
        lnL = self.lnlikes[inds]
        _guess = 4.5
        if kernel is None:
            kernel = kernels.ExpSquaredKernel(metric=_guess, ndim=len(x[0]))
        #Note: the mean is set slightly lower that the minimum lnlike
        lnPmin = np.min(self.lnlikes)
        gp = GP(kernel, mean=lnPmin - np.fabs(lnPmin * 3))
        gp.compute(x)

        def neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(lnL)

        def grad_neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(lnL)

        result = minimize(neg_ln_likelihood,
                          gp.get_parameter_vector(),
                          jac=grad_neg_ln_likelihood)
        print(result)
        gp.set_parameter_vector(result.x)
        self.gp = gp
        self.lnL_training = lnL
        return
Ejemplo n.º 3
0
def test_gradient(solver, white_noise, seed=123, N=305, ndim=3, eps=1.32e-3):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5, ndim=ndim)
    kwargs = dict()
    if white_noise is not None:
        kwargs = dict(white_noise=white_noise, fit_white_noise=True)
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, **kwargs)

    # Sample some data.
    x = np.random.rand(N, ndim)
    x = x[np.argsort(x[:, 0])]
    y = gp.sample(x)
    gp.compute(x, yerr=0.1)

    # Compute the initial gradient.
    grad0 = gp.grad_log_likelihood(y)
    vector = gp.get_parameter_vector()

    for i, v in enumerate(vector):
        # Compute the centered finite difference approximation to the gradient.
        vector[i] = v + eps
        gp.set_parameter_vector(vector)
        lp = gp.lnlikelihood(y)

        vector[i] = v - eps
        gp.set_parameter_vector(vector)
        lm = gp.lnlikelihood(y)

        vector[i] = v
        gp.set_parameter_vector(vector)

        grad = 0.5 * (lp - lm) / eps
        assert np.abs(grad - grad0[i]) < 5 * eps, \
            "Gradient computation failed in dimension {0} ({1})\n{2}" \
            .format(i, solver.__name__, np.abs(grad - grad0[i]))