예제 #1
0
def _general_metric(metric, N=100, ndim=3):
    kernel = 0.1 * kernels.ExpSquaredKernel(metric, ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = np.empty((N, N))
    for i in range(N):
        for j in range(N):
            r = x[i] - x[j]
            r2 = np.dot(r, np.linalg.solve(metric, r))
            M2[i, j] = 0.1 * np.exp(-0.5 * r2)

    if not np.allclose(M0, M2):
        print(M0)
        print()
        print(M2)
        print()
        print(M0 - M2)
        print()
        print(M0 / M2)

        L = np.linalg.cholesky(metric)
        i = N - 1
        j = N - 2
        r = x[j] - x[i]
        print(x[i], x[j])
        print("r = ", r)
        print("L.r = ", np.dot(L, r))
    assert np.allclose(M0, M2)
예제 #2
0
def test_gp_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=5.0, fit_mean=True)
    gp.compute(x)
    check_gradient(gp, y)
예제 #3
0
파일: test_metrics.py 프로젝트: dfm/george
def _general_metric(metric, N=100, ndim=3):
    kernel = 0.1 * kernels.ExpSquaredKernel(metric, ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = np.empty((N, N))
    for i in range(N):
        for j in range(N):
            r = x[i] - x[j]
            r2 = np.dot(r, np.linalg.solve(metric, r))
            M2[i, j] = 0.1 * np.exp(-0.5*r2)

    if not np.allclose(M0, M2):
        print(M0)
        print()
        print(M2)
        print()
        print(M0 - M2)
        print()
        print(M0 / M2)

        L = np.linalg.cholesky(metric)
        i = N - 1
        j = N - 2
        r = x[j] - x[i]
        print(x[i], x[j])
        print("r = ", r)
        print("L.r = ", np.dot(L, r))
    assert np.allclose(M0, M2)
예제 #4
0
파일: test_kernels.py 프로젝트: dfm/george
def test_dtype(seed=123):
    np.random.seed(seed)
    kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
    kernel.pars = [1, 2]
    gp = GP(kernel)
    x = np.random.rand(100)
    gp.compute(x, 1e-2)
예제 #5
0
def test_dtype(seed=123):
    np.random.seed(seed)
    kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
    kernel.pars = [1, 2]
    gp = GP(kernel)
    x = np.random.rand(100)
    gp.compute(x, 1e-2)
예제 #6
0
    def __init__(self,
                 data=None,
                 param={},
                 bounds={},
                 fixed=[],
                 group=None,
                 **kwargs):
        """

        :param data:
        :param param:
        :param bounds:
        :param group:
        """
        self.data = []
        self.param, self.param_name, self.param_bounds = np.array(
            []), np.array([]), []
        self.Ndata = 0
        self.index_param_indiv = []
        self.index_bounds_indiv = []
        self.index_param_common = []
        self.index_bounds_common = []
        self.group_indiv = []
        self.fixed = []
        self.group_common = []
        self.detrendvar = {}
        self._varind = 0
        self.gp = GP(solver=BasicSolver)
        self._mcmc_parinit_optimized = False
        self.add_data(data, param, bounds, fixed, group, **kwargs)
예제 #7
0
def test_gp_callable_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    mean = CallableModel(lambda x: 5.0 * x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=mean)
    gp.compute(x)
    check_gradient(gp, y)
예제 #8
0
파일: QPGP.py 프로젝트: farr/PleiadesStars
def plotsample_QP(p, t, y, tsel, ysel):
    kern, sig = kern_QP(p)
    gp = GP(kern)
    yerr = np.ones(len(ysel)) * sig
    gp.compute(tsel, yerr)
    mu = gp.sample_conditional(ysel, t)
    pl.plot(t, mu, color='c', alpha=0.3)
    return
예제 #9
0
파일: test_modeling.py 프로젝트: dfm/george
def test_gp_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3),
            mean=5.0, fit_mean=True)
    gp.compute(x)
    check_gradient(gp, y)
예제 #10
0
파일: test_modeling.py 프로젝트: dfm/george
def test_gp_callable_mean(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    mean = CallableModel(lambda x: 5.0*x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=mean)
    gp.compute(x)
    check_gradient(gp, y)
예제 #11
0
파일: gp.py 프로젝트: Cadair/k2sc
class GeorgeGP(object):
    def __init__(self, kernel):
        self.kernel = kernel
        self._gp = GP(kernel._k)
        self._y     = None       ## Cached inputs
        self._x     = None       ## Cached values
        self._dirty = True       ## Flag telling if the arrays are up to date


    @property
    def is_dirty(self):
        return self._dirty

    def set_dirty(self, is_dirty=True):
        self._dirty = is_dirty

    def set_pv(self, pv=None):
        if pv is not None and not array_equal(pv, self.kernel._pv):
            self.kernel.set_pv(pv)
            self._gp.kernel = self.kernel._k
            self.set_dirty()

    def set_inputs(self, x=None):
        if x is not None and not array_equal(x, self._x):
            self._x = x
            self.set_dirty()

    def _covariance_matrix(self, x1, x2=None, pv=None, separate=False):
        self.set_pv(pv)
        if separate:
            return (self.kernel._k1.value(x1, x2),
                    self.kernel._k2.value(x1, x2))
        else:
            return self.kernel._k.value(x1, x2)
    
    def compute(self, x=None, pv=None):
        self.set_pv(pv)
        self.set_inputs(x)
        if self.is_dirty:
            self._gp.compute(self._x, yerr=self.kernel._pm[-1], sort=False)
            self.set_dirty(False)
    
    def negll(self, pv, y=None):
        y = y if y is not None else self._y
        self.compute(self._x, pv)
        return -self._gp.lnlikelihood(y)

    def predict(self, x, mean_only=True):
        return self._gp.predict(self._y, x, mean_only=mean_only)

    def predict_components(self, pv, y, x1, x2=None):
        self.compute(x1, pv)
        b  = self._gp.solver.apply_inverse(y)
        K1 = self.kernel._k1.value(x1, x2)
        K2 = self.kernel._k2.value(x1, x2)
        mu_time = dot(K1,b)
        mu_pos  = dot(K2,b)
        return mu_time, mu_pos
예제 #12
0
파일: gp.py 프로젝트: vatsalpanwar/k2sc
class GeorgeGP(object):
    def __init__(self, kernel):
        self.kernel = kernel
        self._gp = GP(kernel._k)
        self._y = None  ## Cached inputs
        self._x = None  ## Cached values
        self._dirty = True  ## Flag telling if the arrays are up to date

    @property
    def is_dirty(self):
        return self._dirty

    def set_dirty(self, is_dirty=True):
        self._dirty = is_dirty

    def set_pv(self, pv=None):
        if pv is not None and not array_equal(pv, self.kernel._pv):
            self.kernel.set_pv(pv)
            self._gp.kernel = self.kernel._k
            self.set_dirty()

    def set_inputs(self, x=None):
        if x is not None and not array_equal(x, self._x):
            self._x = x
            self.set_dirty()

    def _covariance_matrix(self, x1, x2=None, pv=None, separate=False):
        self.set_pv(pv)
        if separate:
            return (self.kernel._k1.value(x1,
                                          x2), self.kernel._k2.value(x1, x2))
        else:
            return self.kernel._k.value(x1, x2)

    def compute(self, x=None, pv=None):
        self.set_pv(pv)
        self.set_inputs(x)
        if self.is_dirty:
            self._gp.compute(self._x, yerr=self.kernel._pm[-1], sort=False)
            self.set_dirty(False)

    def negll(self, pv, y=None):
        y = y if y is not None else self._y
        self.compute(self._x, pv)
        return -self._gp.lnlikelihood(y)

    def predict(self, x, mean_only=True):
        return self._gp.predict(self._y, x, mean_only=mean_only)

    def predict_components(self, pv, y, x1, x2=None):
        self.compute(x1, pv)
        b = self._gp.solver.apply_inverse(y)
        K1 = self.kernel._k1.value(x1, x2)
        K2 = self.kernel._k2.value(x1, x2)
        mu_time = dot(K1, b)
        mu_pos = dot(K2, b)
        return mu_time, mu_pos
 def lnprior(self, pars):
     theta = pars[:self.Nbins]
     if np.any(theta < 0):
         return -np.inf
     a, tau, err = np.exp(pars[self.Nbins:-1])
     mean = pars[-1]
     kernel = a * kernels.ExpSquaredKernel(tau)
     gp = GP(kernel, mean=mean)
     gp.compute(self.bin_centers, yerr=err)
     return gp.lnlikelihood(theta) / self.smoothing
예제 #14
0
def create_gp(params):
    # GP parameters
    s2 = np.exp(params["log_s2"])
    taux = np.exp(params["log_taux"])
    tauy = np.exp(params["log_tauy"])

    gpx = GP(s2 * ExpSquaredKernel(taux))
    gpy = GP(s2 * ExpSquaredKernel(tauy))

    return gpx, gpy
예제 #15
0
def test_apply_inverse(solver, seed=1234, N=201, yerr=0.1):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-10
    gp = GP(kernel, solver=solver, **kwargs)

    # Sample some data.
    x = np.sort(np.random.rand(N))
    y = gp.sample(x)
    gp.compute(x, yerr=yerr)

    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += yerr**2

    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)

    y = gp.sample(x, size=5).T
    b1 = np.linalg.solve(K, y)
    b2 = gp.apply_inverse(y)
    assert np.allclose(b1, b2)
예제 #16
0
파일: test_modeling.py 프로젝트: dfm/george
def test_gp_callable_white_noise(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3), mean=5.0,
            white_noise=LinearWhiteNoise(-6, 0.01),
            fit_white_noise=True)
    gp.compute(x)
    check_gradient(gp, y)

    gp.freeze_parameter("white_noise:m")
    check_gradient(gp, y)
예제 #17
0
    def create(self,
               rseed=0,
               ldcs=None,
               wnsigma=None,
               rnsigma=None,
               rntscale=0.5,
               nights=1):
        ldcs = ldcs if ldcs is not None else self.ldcs
        seed(rseed)

        self.time = linspace(-0.5 * float(self.t_total_d),
                             0.5 * float(self.t_total_d), self.n_exp)
        self.time = (tile(self.time, [nights, 1]) +
                     (self.p * arange(nights))[:, newaxis]).ravel()
        self.npt = self.time.size

        self.transit = zeros([self.npt, 4])
        for i, (ldc, c) in enumerate(zip(ldcs, self.contamination)):
            self.transit[:, i] = MA().evaluate(self.time,
                                               self.k0,
                                               ldc,
                                               0,
                                               self.p,
                                               self.a,
                                               self.i,
                                               c=c)

        # White noise
        # -----------
        if wnsigma is not None:
            self.wnoise = multivariate_normal(
                zeros(atleast_2d(self.transit).shape[1]),
                diag(wnsigma)**2, self.npt)
        else:
            self.wnoise = zeros_like(self.transit)

        # Red noise
        # ---------
        if rnsigma and with_george:
            self.gp = GP(rnsigma**2 * ExpKernel(rntscale))
            self.gp.compute(self.time)
            self.rnoise = self.gp.sample(self.time, self.npb).T
            self.rnoise -= self.rnoise.mean(0)
        else:
            self.rnoise = zeros_like(self.transit)

        # Final light curve
        # -----------------
        self.time_h = Qty(self.time, 'd').rescale('h')
        self.flux = self.transit + self.wnoise + self.rnoise
        return self.time_h, self.flux
예제 #18
0
def test_bounds():
    kernel = 10 * kernels.ExpSquaredKernel(1.0, metric_bounds=[(None, 4.0)])
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    # Test bounds length.
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())
    gp.freeze_all_parameters()
    gp.thaw_parameter("white_noise:m")
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())

    # Test invalid bounds specification.
    with pytest.raises(ValueError):
        kernels.ExpSine2Kernel(gamma=0.1, log_period=5.0, bounds=[10.0])
예제 #19
0
 def lnprior(self, pars):
     """
     Smoothing prior using gaussian process.
     We will learn the hyperparameters and marginalize over them.
     """
     theta = pars[:self.Nbins]
     if np.any(theta < 0):
         return -np.inf
     a, tau, err = np.exp(pars[self.Nbins:-1])
     mean = pars[-1]
     kernel = a * kernels.ExpSquaredKernel(tau)
     gp = GP(kernel, mean=mean)
     gp.compute(self.bin_centers, yerr=err)
     return gp.lnlikelihood(theta) / self.smoothing
예제 #20
0
파일: test_metrics.py 프로젝트: dfm/george
def test_axis_algined_metric(seed=1234, N=100, ndim=3):
    np.random.seed(seed)

    kernel = 0.1 * kernels.ExpSquaredKernel(np.ones(ndim), ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = 0.1*np.exp(-0.5*np.sum((x[None, :, :] - x[:, None, :])**2, axis=-1))
    assert np.allclose(M0, M2)
예제 #21
0
def test_axis_algined_metric(seed=1234, N=100, ndim=3):
    np.random.seed(seed)

    kernel = 0.1 * kernels.ExpSquaredKernel(np.ones(ndim), ndim=ndim)

    x = np.random.rand(N, ndim)
    M0 = kernel.get_value(x)

    gp = GP(kernel)
    M1 = gp.get_matrix(x)
    assert np.allclose(M0, M1)

    # Compute the expected matrix.
    M2 = 0.1 * np.exp(-0.5 * np.sum(
        (x[None, :, :] - x[:, None, :])**2, axis=-1))
    assert np.allclose(M0, M2)
예제 #22
0
파일: test_modeling.py 프로젝트: dfm/george
def test_bounds():
    kernel = 10 * kernels.ExpSquaredKernel(1.0, metric_bounds=[(None, 4.0)])
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    # Test bounds length.
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())
    gp.freeze_all_parameters()
    gp.thaw_parameter("white_noise:m")
    assert len(gp.get_parameter_bounds()) == len(gp.get_parameter_vector())

    # Test invalid bounds specification.
    with pytest.raises(ValueError):
        kernels.ExpSine2Kernel(gamma=0.1, log_period=5.0, bounds=[10.0])
예제 #23
0
    def add_spots(self,QP=[5e-5,0.5,30.,28.]):
        """
        This attribute add stellar variability using a Quasi-periodic Kernel
        The activity is added using a george Kernel
        """

        if not hasattr(self,'flux_spots'):

            A  = QP[0]
            le = QP[1]
            lp = QP[2]
            P  = QP[3]

            from george import kernels, GP
            k = A * kernels.ExpSine2Kernel(gamma=1./2/lp,log_period=np.log(P)) * \
            kernels.ExpSquaredKernel(metric=le)
            gp = GP(k)
            self.flux_spots = 1 + gp.sample(self.time)

        self.flux = self.flux * self.flux_spots

        self.spots = True
예제 #24
0
def test_pickle(solver, success, N=50, seed=123):
    np.random.seed(seed)
    kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
    kernel.pars = [1, 2]
    gp = GP(kernel, solver=solver)
    x = np.random.rand(100)
    gp.compute(x, 1e-2)

    s = pickle.dumps(gp, -1)
    gp = pickle.loads(s)
    if success:
        gp.compute = _fake_compute
    gp.lnlikelihood(np.sin(x))
예제 #25
0
파일: QPGP.py 프로젝트: farr/PleiadesStars
def plotpred_QP(p, t, y):
    kern, sig = kern_QP(p)
    gp = GP(kern)
    yerr = np.ones(len(y)) * sig
    gp.compute(t, yerr)
    mu, cov = gp.predict(y, t)
    sigma = np.diag(cov)
    sigma = np.sqrt(sigma**2 + yerr**2)
    pl.fill_between(t, mu + 2 * sigma, mu - 2 * sigma, \
                    color='c', alpha=0.3)
    pl.plot(t, mu, color='c', lw=2)
    nper = (len(p) - 1) / 4
    # if nper > 1:
    #     cols = ['c','m','y','k']
    #     for i in range(nper):
    #         p1 = np.append(p[i*4:i*4+4], p[-1])
    #         k1, sig = kern_QP(p1)
    #         b = gp.solver.apply_inverse(y)
    #         X = np.transpose([t])
    #         K1 = k1.value(t, t)
    #         mu1 = np.dot(K1, b)
    #         col = np.roll(cols, -i)[0]
    #         pl.plot(t, mu, color = col, lw = 2)
    return
예제 #26
0
def test_predict_single(solver, seed=1234, N=201, yerr=0.1):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, **kwargs)

    x = np.sort(np.random.rand(N))
    y = gp.sample(x)
    gp.compute(x, yerr=yerr)
    mu0, var0 = gp.predict(y, [0.0], return_var=True)
    mu, var = gp.predict(y, [0.0, 1.0], return_var=True)
    _, cov = gp.predict(y, [0.0, 1.0])
    assert np.allclose(mu0, mu[0])
    assert np.allclose(var0, var[0])
    assert np.allclose(var0, cov[0, 0])
예제 #27
0
def test_gp_callable_white_noise(N=50, seed=1234):
    np.random.seed(seed)
    x = np.random.uniform(0, 5)
    y = 5 + np.sin(x)
    gp = GP(10. * kernels.ExpSquaredKernel(1.3),
            mean=5.0,
            white_noise=LinearWhiteNoise(-6, 0.01),
            fit_white_noise=True)
    gp.compute(x)
    check_gradient(gp, y)

    gp.freeze_parameter("white_noise:m")
    check_gradient(gp, y)
예제 #28
0
파일: test_pickle.py 프로젝트: dfm/george
def test_pickle(solver, success, N=50, seed=123):
    np.random.seed(seed)
    kernel = 0.1 * kernels.ExpSquaredKernel(1.5)
    kernel.pars = [1, 2]
    gp = GP(kernel, solver=solver)
    x = np.random.rand(100)
    gp.compute(x, 1e-2)

    s = pickle.dumps(gp, -1)
    gp = pickle.loads(s)
    if success:
        gp.compute = _fake_compute
    gp.lnlikelihood(np.sin(x))
    def __init__(self, mean, covariance, points, lnlikes, quiet=True):
        self.mean = mean
        self.cov = covariance

        #Let's try only interpolating over points that are
        #better than, say, 10-ish sigma
        dof = len(self.mean)
        inds = np.fabs(np.max(lnlikes) - lnlikes) < 100 * dof
        print(inds)
        print(lnlikes)

        self.points = points[inds]
        self.lnlikes_true = lnlikes[inds]
        self.lnlike_max = np.max(lnlikes[inds])
        self.lnlikes = lnlikes[inds] - self.lnlike_max  #max is now at 0
        self.x = self._transform_data(self.points)

        print(max(self.lnlikes), min(self.lnlikes))

        _guess = 4.5  #kernel length guess
        kernel = kernels.ExpSquaredKernel(metric=_guess, ndim=dof)
        lnPmin = np.min(self.lnlikes)
        gp = GP(kernel, mean=lnPmin - np.fabs(lnPmin * 3))

        gp.compute(self.x)

        def neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(self.lnlikes)

        def grad_neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(self.lnlikes)

        result = minimize(neg_ln_likelihood,
                          gp.get_parameter_vector(),
                          jac=grad_neg_ln_likelihood)
        if not quiet:
            print(result)
        gp.set_parameter_vector(result.x)
        self.gp = gp
예제 #30
0
    def __init__(self, target, datasets, filters, model='pb_independent_k', fit_wn=True, **kwargs):
        super().__init__(target, datasets, filters, model, **kwargs)

        pbl = [LParameter('bl_{:d}'.format(i), 'baseline', '', N(1, 1e-2), bounds=(-inf,inf)) for i in range(self.nlc)]
        self.ps.thaw()
        self.ps.add_lightcurve_block('baseline', 1, self.nlc, pbl)
        self.ps.freeze()

        self._slbl = self.ps.blocks[-1].slice
        self._stbl = self.ps.blocks[-1].start

        self.logwnvar = log(array(self.wn) ** 2)
        self._create_kernel()
        self.covariates = [cv[:, self.covids] for cv in self.covariates]
        self.freeze = []
        self.standardize = []

        self.gps = [GP(self._create_kernel(),
                       mean=0., fit_mean=False,
                       white_noise=0.8*wn, fit_white_noise=fit_wn) for wn in self.logwnvar]

        # Freeze the GP hyperparameters marked as frozen in _create_kernel
        for gp in self.gps:
            pars = gp.get_parameter_names()
            [gp.freeze_parameter(pars[i]) for i in self.freeze]

        # Standardize the covariates marked to be standardized in _create_kernel
        for c in self.covariates:
            for i in self.standardize:
                c[:,i] = (c[:,i] - c[:,i].min()) / c[:,i].ptp()

        self.compute_always = False
        self.compute_gps()
        self.de = None
        self.gphpres = None
        self.gphps = None
예제 #31
0
def test_prediction(solver, seed=42):
    """Basic sanity checks for GP regression."""

    np.random.seed(seed)

    kernel = kernels.ExpSquaredKernel(1.0)
    kwargs = dict()
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, white_noise=0.0, **kwargs)

    x0 = np.linspace(-10, 10, 500)
    x = np.sort(np.random.uniform(-10, 10, 300))
    gp.compute(x)

    y = np.sin(x)
    mu, cov = gp.predict(y, x0)

    Kstar = gp.get_matrix(x0, x)
    K = gp.get_matrix(x)
    K[np.diag_indices_from(K)] += 1.0
    mu0 = np.dot(Kstar, np.linalg.solve(K, y))
    print(np.abs(mu - mu0).max())
    assert np.allclose(mu, mu0)
예제 #32
0
    def train(self, kernel=None):
        """Train a Gaussian Process to interpolate the log-likelihood
        of the training samples.

        Args:
            kernel (george.kernels.Kernel object): kernel to use, or any 
                acceptable object that can be accepted by the george.GP object

        """
        inds = self.training_inds
        x = self.chain_rotated_regularized[inds]
        lnL = self.lnlikes[inds]
        _guess = 4.5
        if kernel is None:
            kernel = kernels.ExpSquaredKernel(metric=_guess, ndim=len(x[0]))
        #Note: the mean is set slightly lower that the minimum lnlike
        lnPmin = np.min(self.lnlikes)
        gp = GP(kernel, mean=lnPmin - np.fabs(lnPmin * 3))
        gp.compute(x)

        def neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.log_likelihood(lnL)

        def grad_neg_ln_likelihood(p):
            gp.set_parameter_vector(p)
            return -gp.grad_log_likelihood(lnL)

        result = minimize(neg_ln_likelihood,
                          gp.get_parameter_vector(),
                          jac=grad_neg_ln_likelihood)
        print(result)
        gp.set_parameter_vector(result.x)
        self.gp = gp
        self.lnL_training = lnL
        return
예제 #33
0
def test_repeated_prediction_cache():
    kernel = kernels.ExpSquaredKernel(1.0)
    gp = GP(kernel)

    x = np.array((-1, 0, 1))
    gp.compute(x)

    t = np.array((-.5, .3, 1.2))

    y = x / x.std()
    mu0, mu1 = (gp.predict(y, t, return_cov=False) for _ in range(2))
    assert np.array_equal(mu0, mu1), \
        "Identical training data must give identical predictions " \
        "(problem with GP cache)."

    y2 = 2 * y
    mu2 = gp.predict(y2, t, return_cov=False)
    assert not np.array_equal(mu0, mu2), \
        "Different training data must give different predictions " \
        "(problem with GP cache)."

    a0 = gp._alpha
    gp.kernel[0] += 0.1
    gp.recompute()
    gp._compute_alpha(y2, True)
    a1 = gp._alpha
    assert not np.allclose(a0, a1), \
        "Different kernel parameters must give different alphas " \
        "(problem with GP cache)."

    mu, cov = gp.predict(y2, t)
    _, var = gp.predict(y2, t, return_var=True)
    assert np.allclose(np.diag(cov), var), \
        "The predictive variance must be equal to the diagonal of the " \
        "predictive covariance."
예제 #34
0
def test_gradient(solver, white_noise, seed=123, N=305, ndim=3, eps=1.32e-3):
    np.random.seed(seed)

    # Set up the solver.
    kernel = 1.0 * kernels.ExpSquaredKernel(0.5, ndim=ndim)
    kwargs = dict()
    if white_noise is not None:
        kwargs = dict(white_noise=white_noise, fit_white_noise=True)
    if solver == HODLRSolver:
        kwargs["tol"] = 1e-8
    gp = GP(kernel, solver=solver, **kwargs)

    # Sample some data.
    x = np.random.rand(N, ndim)
    x = x[np.argsort(x[:, 0])]
    y = gp.sample(x)
    gp.compute(x, yerr=0.1)

    # Compute the initial gradient.
    grad0 = gp.grad_log_likelihood(y)
    vector = gp.get_parameter_vector()

    for i, v in enumerate(vector):
        # Compute the centered finite difference approximation to the gradient.
        vector[i] = v + eps
        gp.set_parameter_vector(vector)
        lp = gp.lnlikelihood(y)

        vector[i] = v - eps
        gp.set_parameter_vector(vector)
        lm = gp.lnlikelihood(y)

        vector[i] = v
        gp.set_parameter_vector(vector)

        grad = 0.5 * (lp - lm) / eps
        assert np.abs(grad - grad0[i]) < 5 * eps, \
            "Gradient computation failed in dimension {0} ({1})\n{2}" \
            .format(i, solver.__name__, np.abs(grad - grad0[i]))
예제 #35
0
def get_linear_gp(pars):
    amp, sigma = pars
    kernel = (amp * PolynomialKernel(sigma, order=2))
    return GP(kernel)
예제 #36
0
def get_composite_gp(pars):
    amp, sigma, amp2, length = pars
    kernel = (amp * PolynomialKernel(sigma, order=1) +
              amp2 * ExpSquaredKernel(length))
    return GP(kernel)
예제 #37
0
파일: test_modeling.py 프로젝트: dfm/george
def test_parameters():
    kernel = 10 * kernels.ExpSquaredKernel(1.0)
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    n = len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())
    assert n - 2 == len(kernel.get_parameter_names())

    gp.freeze_parameter(gp.get_parameter_names()[0])
    assert n - 1 == len(gp.get_parameter_names())
    assert n - 1 == len(gp.get_parameter_vector())

    gp.freeze_all_parameters()
    assert len(gp.get_parameter_names()) == 0
    assert len(gp.get_parameter_vector()) == 0

    gp.kernel.thaw_all_parameters()
    gp.white_noise.thaw_all_parameters()
    assert n == len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())

    assert np.allclose(kernel[0], np.log(10.))
예제 #38
0
def get_fancy_gp(pars):
    amp, a, b, c, d = pars
    kernel = amp * MyDijetKernelSimp(a, b, c, d)
    return GP(kernel)
예제 #39
0
파일: gp.py 프로젝트: Cadair/k2sc
 def __init__(self, kernel):
     self.kernel = kernel
     self._gp = GP(kernel._k)
     self._y     = None       ## Cached inputs
     self._x     = None       ## Cached values
     self._dirty = True       ## Flag telling if the arrays are up to date
예제 #40
0
파일: multigp.py 프로젝트: jbernhard/mtd
    def run(self):
        gp = GP(self._kernel)
        gp.compute(self._x)
        y = self._y
        pipe = self._out_pipe

        for cmd, args, kwargs in iter(pipe.recv, None):
            if cmd == 'predict':
                result = gp.predict(y, *args, **kwargs)
                if len(result) == 2:
                    # only return the diagonal of the covariance matrix
                    result = result[0], result[1].diagonal()

            elif cmd == 'get_kernel_pars':
                result = gp.kernel.pars

            elif cmd == 'set_kernel_pars':
                gp.kernel.pars = args[0]
                result = None

            elif cmd == 'train':
                prior, nstarts, verbose = args

                # define function for negative log-likelihood and its gradient
                def nll(vector, bad_return=(1e30, np.zeros(len(gp.kernel)))):
                    # prevent exp overflow
                    if np.any(vector > 100.):
                        return bad_return
                    gp.kernel.vector = vector
                    ll = gp.lnlikelihood(y, quiet=True)
                    if not np.isfinite(ll):
                        return bad_return
                    grad = gp.grad_lnlikelihood(y, quiet=True)
                    return -ll, -grad

                if verbose:
                    print(self.name, 'starting training')

                # sample random initial positions from prior
                # run optimization for each
                result = tuple(
                    optimize.minimize(nll, x0, jac=True, **kwargs)
                    for x0 in np.log(prior.rvs(nstarts))
                )

                if verbose:
                    print(self.name, 'training complete')
                    # Print a table of results.
                    # Since results are sure to repeat,
                    # group them and output a row for each group:
                    #   number ll *hyperpars
                    for nll, group in itertools.groupby(
                            sorted(result, key=lambda r: r.fun),
                            key=lambda r: round(r.fun, 2)
                    ):
                        for n, r in enumerate(group, start=1):
                            pass
                        print(' ', n, -nll, _format_number_list(*np.exp(r.x)))

                # set hyperparameters to opt result with best likelihood
                gp.kernel.vector = min(result, key=lambda r: r.fun).x

            else:
                result = ValueError('Unknown command: {}.'.format(cmd))

            pipe.send(result)
예제 #41
0
class MockLC:
    pb_names = "g' r' i' z'".split()
    pb_centers = 1e-9 * array([470, 640, 780, 900])
    npb = len(pb_names)

    def __init__(self, setup: SimulationSetup, **kwargs):
        self.setup = self.s = s = setup
        self.t_exposure_d = Qty(kwargs.get('exptime', 60), 's').rescale('d')
        self.t_baseline_d = Qty(s.t_baseline, 'h').rescale('d')
        self.ldcs = s.ldcs
        self.tm = QuadraticModel(klims=(0.01, 0.99), nk=512)

        self.filters = "g' r' i' z'".split()
        self.npb = len(self.filters)
        self.k_apparent, self.p, self.a, self.b, self.i = s.orbital_parameters

        self.duration_d = Qty(
            duration_eccentric(self.p, self.k_apparent, self.a, self.i, 0, 0,
                               1), 'd')

        # Contamination
        # -------------
        qe_be = TabulatedFilter('1024B_eXcelon', [
            300, 325, 350, 400, 450, 500, 700, 800, 850, 900, 950, 1050, 1150
        ], [
            0.0, 0.1, 0.25, 0.60, 0.85, 0.92, 0.96, 0.85, 0.70, 0.50, 0.30,
            0.05, 0.0
        ])
        qe_b = TabulatedFilter(
            '2014B', [300, 350, 500, 550, 700, 800, 1000, 1050],
            [0.10, 0.20, 0.90, 0.96, 0.90, 0.75, 0.11, 0.05])
        qes = qe_be, qe_b, qe_be, qe_be

        self.instrument = instrument = Instrument(
            'MuSCAT2', (sdss_g, sdss_r, sdss_i, sdss_z), qes)
        self.contaminator = SMContamination(instrument, "i'")

        self.hteff = setup.hteff
        self.cteff = setup.cteff
        self.i_contamination = setup.c
        self.k_true = setup.k_apparent / sqrt(1 - self.i_contamination)
        self.contamination = self.contaminator.contamination(
            self.i_contamination, self.hteff, self.cteff)

    @property
    def t_total_d(self):
        return self.duration_d + 2 * self.t_baseline_d

    @property
    def duration_h(self):
        return self.duration_d.rescale('h')

    @property
    def n_exp(self):
        return int(self.t_total_d // self.t_exposure_d)

    def __call__(self,
                 rseed=0,
                 ldcs=None,
                 wnsigma=None,
                 rnsigma=None,
                 rntscale=0.5):
        return self.create(rseed, ldcs, wnsigma, rnsigma, rntscale)

    def create(self,
               rseed=0,
               ldcs=None,
               wnsigma=None,
               rnsigma=None,
               rntscale=0.5,
               nights=1):
        ldcs = ldcs if ldcs is not None else self.ldcs
        seed(rseed)

        self.time = linspace(-0.5 * float(self.t_total_d),
                             0.5 * float(self.t_total_d), self.n_exp)
        self.time = (tile(self.time, [nights, 1]) +
                     (self.p * arange(nights))[:, newaxis]).ravel()
        self.npt = self.time.size
        self.tm.set_data(self.time)

        self.transit = zeros([self.npt, 4])
        for i, (ldc, c) in enumerate(zip(ldcs, self.contamination)):
            self.transit[:, i] = self.tm.evaluate_ps(self.k_true, ldc, 0,
                                                     self.p, self.a, self.i)
            self.transit[:, i] = c + (1 - c) * self.transit[:, i]

        # White noise
        # -----------
        if wnsigma is not None:
            self.wnoise = multivariate_normal(
                zeros(atleast_2d(self.transit).shape[1]),
                diag(wnsigma)**2, self.npt)
        else:
            self.wnoise = zeros_like(self.transit)

        # Red noise
        # ---------
        if rnsigma and with_george:
            self.gp = GP(rnsigma**2 * ExpKernel(rntscale))
            self.gp.compute(self.time)
            self.rnoise = self.gp.sample(self.time, self.npb).T
            self.rnoise -= self.rnoise.mean(0)
        else:
            self.rnoise = zeros_like(self.transit)

        # Final light curve
        # -----------------
        self.time_h = Qty(self.time, 'd').rescale('h')
        self.flux = self.transit + self.wnoise + self.rnoise
        return self.lcdataset

    @property
    def lcdataset(self):
        return LCDataSet([
            LCData(self.time, flux, pb)
            for pb, flux in zip(self.pb_names, self.flux.T)
        ], self.instrument)

    def plot(self, figsize=(13, 4), yoffset=0.01):
        fig, axs = pl.subplots(1,
                               3,
                               figsize=figsize,
                               sharex='all',
                               sharey='all')
        yshift = yoffset * arange(4)
        axs[0].plot(self.time_h, self.flux + yshift)
        axs[1].plot(self.time_h, self.transit + yshift)
        axs[2].plot(self.time_h, 1 + self.rnoise + yshift)
        pl.setp(axs, xlabel='Time [h]', xlim=self.time_h[[0, -1]])
        pl.setp(axs[0], ylabel='Normalised flux')
        [
            pl.setp(ax, title=title) for ax, title in zip(
                axs, 'Transit model + noise, Transit model, Red noise'.split(
                    ', '))
        ]
        fig.tight_layout()
        return fig, axs

    def plot_color_difference(self, figsize=(13, 4)):
        fig, axs = pl.subplots(2,
                               3,
                               figsize=figsize,
                               sharex='all',
                               sharey='all')
        [
            ax.plot(self.time_h, 100 * (fl - self.transit[:, -1]))
            for ax, fl in zip(axs[0], self.transit[:, :-1].T)
        ]
        [
            ax.plot(self.time_h, 100 * (fl - self.flux[:, -1]))
            for ax, fl in zip(axs[1], self.flux[:, :-1].T)
        ]
        [
            pl.setp(ax, title='F$_{}$ - F$_z$'.format(pb))
            for ax, pb in zip(axs[0], self.pb_names[:-1])
        ]
        pl.setp(axs[:, 0], ylabel='$\Delta F$ [%]')
        pl.setp(axs[1, :], xlabel='Time [h]')
        pl.setp(axs, xlim=self.time_h[[0, -1]])
        fig.tight_layout()
        return fig
예제 #42
0
파일: test_tutorial.py 프로젝트: dfm/george
 def lnlike(p, t, y, yerr, solver=BasicSolver):
     a, tau = np.exp(p[:2])
     gp = GP(a * kernels.Matern32Kernel(tau) + 0.001, solver=solver)
     gp.compute(t, yerr)
     return gp.lnlikelihood(y - model(p, t))
예제 #43
0
class model_transit_lightcurve(object):

    per = 0
    ldmethod = 'quad'

    def __init__(self,
                 data=None,
                 param={},
                 bounds={},
                 fixed=[],
                 group=None,
                 **kwargs):
        """

        :param data:
        :param param:
        :param bounds:
        :param group:
        """
        self.data = []
        self.param, self.param_name, self.param_bounds = np.array(
            []), np.array([]), []
        self.Ndata = 0
        self.index_param_indiv = []
        self.index_bounds_indiv = []
        self.index_param_common = []
        self.index_bounds_common = []
        self.group_indiv = []
        self.fixed = []
        self.group_common = []
        self.detrendvar = {}
        self._varind = 0
        self.gp = GP(solver=BasicSolver)
        self._mcmc_parinit_optimized = False
        self.add_data(data, param, bounds, fixed, group, **kwargs)

    def add_data(self, data, param, bounds, fixed=[], group=None, **kwargs):
        if data:
            if type(data) == str:
                data = np.loadtxt(data, unpack=True, **kwargs)
            if self.Ndata == 0: self.data = data
            if self.Ndata == 1: self.data = [self.data]
            if self.Ndata > 0: self.data.append(data)
            parlen = len(self.param)
            bounlen = len(self.param_bounds)
            if param:
                if isinstance(param, dict) and type(param) != NamedParam:
                    param = NamedParam(param)
                self.param_name = np.append(self.param_name, param.keys())
                self.param = np.append(self.param,
                                       [param[key] for key in param])
                fixedkeys = [
                    param[fx] if type(fx) == int else fx for fx in fixed
                ]
                self.param_bounds += [
                    bounds[key] for key in bounds if key not in fixedkeys
                ]
                self.group_indiv.append(group)
                self.fixed += [
                    (fx if type(fx) == int else list(param).index(fx)) + parlen
                    for fx in fixed
                ]
            self.index_param_indiv.append(np.arange(len(param)) + parlen)
            self.index_bounds_indiv.append(np.arange(len(bounds)) + bounlen)
            if self.Ndata == 1: self.detrendvar = [self.detrendvar]
            if self.Ndata > 0: self.detrendvar.append({})
            self.Ndata += 1

    def add_indiv_param(self, param, bounds, fixed=[], dataindex=-1):
        if self.Ndata < 1: return
        if param:
            if isinstance(param, dict) and type(param) != NamedParam:
                param = NamedParam(param)
            parlen = len(self.param)
            bounlen = len(self.param_bounds)
            self.param_name = np.append(self.param_name, param.keys())
            self.param = np.append(self.param, [param[key] for key in param])
            fixedkeys = [param[fx] if type(fx) == int else fx for fx in fixed]
            self.param_bounds += [
                bounds[key] for key in bounds if key not in fixedkeys
            ]
            self.index_param_indiv[dataindex] = np.append(
                self.index_param_indiv[dataindex],
                np.arange(len(param)) + parlen)
            self.index_bounds_indiv[dataindex] = np.append(
                self.index_bounds_indiv[dataindex],
                np.arange(len(bounds)) + bounlen)
            self.fixed += [parlen + fx for fx in fixed]

    def add_common_param(
            self,
            param,
            bounds={},
            fixed=[],
            group=None):  # TODO: ignore bounds keys if fixed is not []
        if self.Ndata < 1: return
        if param:
            if isinstance(param, dict) and type(param) != NamedParam:
                param = NamedParam(param)
            parlen = len(self.param)
            bounlen = len(self.param_bounds)
            self.param_name = np.append(self.param_name, param.keys())
            self.param = np.append(self.param, [param[key] for key in param])
            fixedkeys = [param[fx] if type(fx) == int else fx for fx in fixed]
            self.param_bounds += [
                bounds[key] for key in bounds if key not in fixedkeys
            ]
            self.group_common.append(group)
            self.fixed += [
                (fx if type(fx) == int else list(param.keys()).index(fx)) +
                parlen for fx in fixed
            ]
            self.index_param_common.append(np.arange(len(param)) + parlen)
            self.index_bounds_common.append(np.arange(len(bounds)) + bounlen)

    def add_detrend_param(self,
                          variable=0,
                          name='',
                          dataindex=-1,
                          coeff=[],
                          bounds=[],
                          fixed=[]):
        if type(variable) == int:
            varnames = ['t', 'flux', 'err']
            if self.Ndata == 1 and dataindex in [0, -1]:
                var = self.data[variable]
            elif self.Ndata > 1:
                var = self.data[dataindex][variable]
            if not name: name = varnames[variable]
        else:
            var = variable
            if not name:
                name = 'var' + str(self._varind)
                self._varind += 1
        name = 'det_' + name
        if coeff:
            parlen = len(self.param)
            bounlen = len(self.param_bounds)
            self.param = np.append(self.param, coeff)
            self.param_name = np.append(
                self.param_name, [name + f'_{i}' for i in range(len(coeff))])
            self.param_bounds += bounds
            if self.Ndata == 1 and dataindex in [0, -1]:
                self.detrendvar.update({name: (var, len(coeff))})
            elif self.Ndata > 1:
                self.detrendvar[dataindex].update({name: (var, len(coeff))})
            self.index_param_indiv[dataindex] = np.append(
                self.index_param_indiv[dataindex],
                np.arange(len(coeff)) + parlen)
            self.index_bounds_indiv[dataindex] = np.append(
                self.index_bounds_indiv[dataindex],
                np.arange(len(bounds)) + bounlen)
            self.fixed += [parlen + fx for fx in fixed]

    def get_named_param(self, param):
        if type(param) != np.ndarray: param = np.array(param)
        if self.Ndata == 1:
            nparam = NamedParam(
                zip(self.param_name[self.index_param_indiv[0]],
                    param[self.index_param_indiv[0]]))
            for j, icarr in enumerate(self.index_param_common):
                if not self.group_indiv[0] or not self.group_common[
                        j] or self.group_indiv[0] == self.group_common[j]:
                    nparam.update(zip(self.param_name[icarr], param[icarr]))
            return nparam
        paramseg = [NamedParam() for _ in self.index_param_indiv]
        for i, iiarr in enumerate(self.index_param_indiv):
            for j, icarr in enumerate(self.index_param_common):
                if not self.group_indiv[i] or not self.group_common[
                        j] or self.group_indiv[i] == self.group_common[j]:
                    paramseg[i].update(
                        zip(self.param_name[icarr], param[icarr]))
            paramseg[i].update(zip(self.param_name[iiarr], param[iiarr]))
        return paramseg

    def _model_function(self, param, t, named=False, detrend=True):
        if not named: param = self.get_named_param(param)
        if detrend:
            return direct_tfm_with_detrend(param, t, self.per, self.detrendvar,
                                           self.ldmethod)
        return direct_tfm(param, t, self.per, self.ldmethod)

    @staticmethod
    def _contains_gppar(param):
        if isinstance(param, (dict, NamedParam)):
            return 'gpa' in param and 'gptau' in param
        return ['gpa' in par and 'gptau' in par for par in param]

    @staticmethod
    def _contais_detrendvar(detrendvar):
        if isinstance(detrendvar, dict):
            return len(detrendvar) == 0
        return [len(dtv) == 0 for dtv in detrendvar]

    def log_likelihood_gp(self, param):
        if self.Ndata == 0: return
        if not self._calcgp: return
        nparam = self.get_named_param(param)
        if self.Ndata == 1:
            detfac = get_detrend_factor(
                nparam, self.detrendvar) if self._calcdetrend else 1
            self.gp.kernel = kernel(nparam['gpa'], nparam['gptau'])
            if np.any(np.isnan(detfac)): print('detfac nan')
            if np.any(np.isinf(detfac)): print('detfac inf')
            if np.any(np.isnan(nparam['gpa'])): print('gpa nan')
            if np.any(np.isinf(nparam['gpa'])): print('gpa inf')
            if np.any(np.isnan(nparam['gptau'])): print('gptau nan')
            if np.any(np.isinf(nparam['gptau'])): print('gptau inf')
            if np.any(np.isnan(self.data[0])): print('t nan')
            if np.any(np.isinf(self.data[0])): print('t inf')
            if np.any(np.isnan(self.data[2])): print('err nan')
            if np.any(np.isinf(self.data[2])): print('err inf')
            try:
                self.gp.compute(self.data[0], self.data[2] / detfac)
            except:
                print(nparam)
                print(np.any(detfac == 0))
                raise
            return self.gp.log_likelihood(
                self.data[1] / detfac - self._model_function(
                    nparam, self.data[0], named=True, detrend=False))
        detfac = get_detrend_factor(
            nparam, self.detrendvar) if self._calcdetrend else np.ones(
                self.Ndata)
        llhood = []
        for i in range(self.Ndata):
            if self._calcgp[i]:
                self.gp.kernel = kernel(nparam[i]['gpa'], nparam[i]['gptau'])
                self.gp.compute(self.data[i][0], self.data[i][2] / detfac[i])
                llhood.append(
                    self.gp.log_likelihood(
                        self.data[i][1] / detfac[i] -
                        self._model_function(nparam[i],
                                             self.data[i][0],
                                             named=True,
                                             detrend=False)))
            else:
                llhood.append(None)
        return llhood

    def run_mcmc(self, *args, **kwargs):
        data = self.data if self.Ndata == 1 else list(
            map(list, zip(*self.data)))
        param = kwargs.pop('param_init', self.param)
        bounds = kwargs.pop('param_bounds', self.param_bounds)
        priorpdf_type = kwargs.pop('priorpdftype', 'uniform')
        priorpdf_args = kwargs.pop('priorpdfargs', ())
        self._calcgp = self._contains_gppar(self.get_named_param(param))
        if not np.any(self._calcgp): self._calcgp = False
        self._calcdetrend = self._contais_detrendvar(self.detrendvar)
        if not np.any(self._calcdetrend): self._calcdetrend = False
        llhood = self.log_likelihood_gp if self._calcgp else None
        self.mcmc = MCMC(np.array(data),
                         param,
                         bounds,
                         fixpos=self.fixed,
                         modelfunc=self._model_function,
                         loglikelihood=llhood,
                         ignorex_loglikelihood=True,
                         priorpdf_type=priorpdf_type,
                         priorpdf_args=priorpdf_args)
        self.mcmc.mcmc_name = kwargs.pop('mcmc_name', '')
        self.mcmc.param_names = self.param_name_plus_group
        optimize = kwargs.pop('preoptimize', False)
        if optimize:
            self.mcmc.optimized_initpar()
            self._mcmc_parinit_optimized = True
        self.mcmc(*args, **kwargs)
        self.mcmc_accepted = self.mcmc.accepted
        self.mcmc_nwalker = self.mcmc.Nwalker
        self.mcmc_niterate = self.mcmc.Niterate

    @property
    def skeleton(self):
        keys = [
            'Ndata', 'index_param_indiv', 'index_bounds_indiv',
            'index_param_common', 'index_bounds_common', 'group_indiv',
            'group_common', 'fixed', 'detrendvar', 'param_name',
            'param_bounds', '_varind', 'mcmc_accepted', 'mcmc_nwalker',
            'mcmc_niterate'
        ]
        skeleton = dict(input_data=self.data, param_init=self.param)
        for key in keys:
            try:
                skeleton[key] = self.__getattribute__(key)
            except:
                skeleton[key] = None
        skeleton['mcmc_param_init_optimized'] = self.mcmc.param_init if self._mcmc_parinit_optimized else []
        return skeleton

    @property
    def param_name_plus_group(self):
        groupexp = np.full(len(self.param), '', dtype=object)
        for i in range(len(self.group_indiv)):
            if self.group_indiv[i]:
                groupexp[self.index_param_indiv[i]] = '-' + self.group_indiv[i]
        for i in range(len(self.group_common)):
            if self.group_common[i]:
                groupexp[self.index_param_common[i]] = self.group_common[i]
        return list(map(''.join, zip(self.param_name, groupexp)))

    def saveall(self,
                retrieval_skeleton='',
                params_mcmc='',
                mcmc_params_rawsample='',
                results_optimized='',
                **kwargs):
        import pickle as pkl
        if retrieval_skeleton:
            pkl.dump(self.skeleton, open(retrieval_skeleton + '.pkl', 'wb'))
        if params_mcmc:
            if 'header' not in kwargs:
                kwargs['header'] = '\t'.join(self.param_name_plus_group)
            np.savetxt(params_mcmc, self.params_mcmc, **kwargs)

    def load_retrieval_skeleton(self, skeleton='', param_init='init'):
        import pickle as pkl
        if skeleton:
            if type(skeleton) == str and os.path.exists(skeleton + '.pkl'):
                skeleton = pkl.load(open(skeleton + '.pkl', 'rb'))
            self.data = skeleton.pop('input_data')
            parami = skeleton.pop('param_init')
            parammo = skeleton.pop('mcmc_param_init_optimized')
            paramo = skeleton.pop('param_optimized', [])
            if param_init == 'init': self.param = parami
            if param_init == 'mcmc_preoptimized': self.param = parammo
            if param_init == 'mcmc_final':
                self.param = self.median_err_params_mcmc[:, 0]
            if param_init == 'optimized_final': self.param = paramo
            for key in skeleton:
                self.__setattr__(key, skeleton[key])

    def load_params_mcmc(self, source):
        if source:
            if type(source) == str and os.path.exists(source):
                source = np.loadtxt(source)
            self.params_mcmc = source

    def get_flatsamples(self, saveto='', **kwargs):
        self.params_mcmc = self.mcmc.get_flatsamples(**kwargs)

    def load_backend_mcmc(self, source, mcmc_name=''):
        if not hasattr(self, 'mcmc'):
            self.mcmc = MCMC([], [])
            self.mcmc.mcmc_name = mcmc_name
            self.mcmc.param_names = self.param_name_plus_group
        self.mcmc.load_backend(source)
        self.mcmc.Niterate, self.mcmc.Nwalker, self.mcmc.Ndim = self.mcmc.get_samples(
        ).shape

    @staticmethod
    def chooseNflatten_from_sample(samples, burn=0, thin=1, accepted=[]):
        return samples[burn::thin, :, :].reshape(
            -1, samples.shape[2]) if len(accepted) != 0 else samples[
                burn::thin, accepted, :].reshape(-1, samples.shape[2])

    def get_best_fit(self, **kwargs):
        data = self.data if self.Ndata == 1 else list(
            map(list, zip(*self.data)))
        param = kwargs.pop('param_init', self.param)
        bounds = kwargs.pop('param_bounds', self.param_bounds)

        self.mcmc = MCMC(np.array(data),
                         param,
                         bounds,
                         fixpos=self.fixed,
                         modelfunc=self._model_function,
                         loglikelihood=self.log_likelihood_gp,
                         ignorex_loglikelihood=True)

    def get_transit_model(self,
                          param,
                          t,
                          named=False,
                          detrend=True,
                          denoiseGP=True):
        if not named:
            if np.ndim(param): param = self.get_named_param(param)
            elif np.ndim(param) == 2:
                param = list(param)
                for i in range(len(param)):
                    param[i] = self.get_named_param(param[i])
        modelop = self._model_function(param, t, named=True, detrend=detrend)
        if not np.any(self._contains_gppar(param)) or not denoiseGP:
            return modelop
        if self.Ndata == 1:
            self.gp.kernel = kernel(param['gpa'], param['gptau'])
            self.gp.compute(self.data[0], self.data[2])
            return self.gp.sample_conditional(self.data[1] - modelop,
                                              t) + modelop
        for i in range(self.Ndata):
            if self._contains_gppar(param[i]):
                self.gp.kernel = kernel(param[i]['gpa'], param[i]['gptau'])
                self.gp.compute(self.data[i][0], self.data[i][2])
                modelop[i] += self.gp.sample_conditional(self.data[i][1] -
                                                         modelop[i],
                                                         t,
                                                         size=100).mean(0)
        return modelop

    def get_adjusted_data(self,
                          param,
                          named=False,
                          detrend=True,
                          denoiseGP=True):
        if self.Ndata == 0: return
        if not named:
            if np.ndim(param): param = self.get_named_param(param)
            elif np.ndim(param) == 2:
                param = list(param)
                for i in range(len(param)):
                    param[i] = self.get_named_param(param[i])
        if detrend and self._calcdetrend:
            detfac = get_detrend_factor(param, self.detrendvar)
        if self.Ndata == 1:
            t, f, e = self.data
            if detrend and self._calcdetrend:
                f /= detfac
                e /= detfac
            if denoiseGP and self._calcgp:
                self.gp.kernel = kernel(param['gpa'], param['gptau'])
                self.gp.compute(t, e)
                f -= self.gp.sample_conditional(
                    f -
                    self._model_function(param, t, named=True, detrend=False),
                    t,
                    size=100).mean(0)
            return t, f, e
        t, f, e = [], [], []
        for i in range(self.Ndata):
            ti, fi, ei = self.data[i]
            if detrend and self._calcdetrend:
                fi /= detfac[i]
                ei /= detfac[i]
            if denoiseGP and self._calcgp:
                self.gp.kernel = kernel(param[i]['gpa'], param[i]['gptau'])
                self.gp.compute(ti, ei)
                fi -= self.gp.sample_conditional(fi - self._model_function(
                    param[i], ti, named=True, detrend=False),
                                                 t,
                                                 size=100).mean(0)
            t.append(ti)
            f.append(fi)
            e.append(ei)
        return t, f, e

    @property
    def median_err_params_mcmc(self):
        return get_median_error_from_distribution(self.params_mcmc,
                                                  sigma=1,
                                                  method='percentile',
                                                  saveas='')

    def save_median_err_params_mcmc(self,
                                    saveto='',
                                    parnames=[],
                                    display=True):
        parfinal = self.median_err_params_mcmc.T
        print(parfinal.shape)
        if not parnames: parnames = self.mcmc.param_names
        if os.path.exists(saveto): os.remove(saveto)
        for i, parname in enumerate(parnames):
            if display:
                print(
                    parname +
                    f': {parfinal[i, 0]} +{parfinal[i, 1]} -{parfinal[i, 2]}')
            if saveto:
                print(
                    parname +
                    f': {parfinal[i, 0]} +{parfinal[i, 1]} -{parfinal[i, 2]}',
                    file=open(saveto, 'a'))

    def overplot_model_median_err_params_mcmc(self,
                                              multifig=False,
                                              axis='auto',
                                              figsize=(10, 10)):
        import matplotlib.pyplot as plt
        params = self.median_err_params_mcmc.T
        t, f, e = self.get_adjusted_data(params[:, 0])
        if self.Ndata == 1:
            fig, ax = plt.subplots(figsize=figsize)
            midfluxfit = self.get_transit_model(params[:, 0],
                                                t,
                                                detrend=False,
                                                denoiseGP=False)
            ax.errorbar(t, f, e, fmt='.')
            ax.plot(t,
                    midfluxfit,
                    c='r',
                    lw=3,
                    label='Model corr. to median of parameters')
            return fig, ax
        if not multifig:
            if axis == 'auto':
                figure, axes = plt.subplots(self.Ndata, 1, figsize=figsize)
            elif isinstance(axis, (tuple, list)) and len(axis) == 2:
                figure, axes = plt.subplots(axis[0], axis[1], figsize=figsize)
            axes = np.ravel(axes)
        else:
            figure, axes = [], []
        for i in range(self.Ndata):
            if multifig:
                fig, ax = plt.subplots(figsize=figsize)
                figure.append(fig)
                axes.append(ax)
            else:
                ax = axes[i]
            midfluxfit = self.get_transit_model(params[i][:, 0],
                                                t[i],
                                                detrend=False,
                                                denoiseGP=False)
            ax.errorbar(t[i], f[i], e[i], fmt='.')
            ax.plot(t[i],
                    midfluxfit,
                    c='r',
                    lw=3,
                    label='Model corr. to median of parameters')
        return figure, axes
예제 #44
0
 def create_gp(self):
     # This function uses the kernel defined above to compute and train the Gaussian Process model
     gp = GP(kernel=self.kk, mean=self.mean)
     gp.compute(self.x_train, self.sigma_n)
     return gp
예제 #45
0
def test_parameters():
    kernel = 10 * kernels.ExpSquaredKernel(1.0)
    kernel += 0.5 * kernels.RationalQuadraticKernel(log_alpha=0.1, metric=5.0)
    gp = GP(kernel, white_noise=LinearWhiteNoise(1.0, 0.1))

    n = len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())
    assert n - 2 == len(kernel.get_parameter_names())

    gp.freeze_parameter(gp.get_parameter_names()[0])
    assert n - 1 == len(gp.get_parameter_names())
    assert n - 1 == len(gp.get_parameter_vector())

    gp.freeze_all_parameters()
    assert len(gp.get_parameter_names()) == 0
    assert len(gp.get_parameter_vector()) == 0

    gp.kernel.thaw_all_parameters()
    gp.white_noise.thaw_all_parameters()
    assert n == len(gp.get_parameter_vector())
    assert n == len(gp.get_parameter_names())

    assert np.allclose(kernel[0], np.log(10.))