Beispiel #1
0
    def test_load_save(self):
        cov_main = GPCov(wfn_params=[1.0,], dfn_params=[ 2.5,], wfn_str="compact0", dfn_str="euclidean")
        cov_fic = GPCov(wfn_params=[1.0,], dfn_params=[ 1.5,], wfn_str="se", dfn_str="euclidean", Xu = self.u)
        noise_var = 1.0

        gp1 = GP(X=self.X,
                 y=self.y1,
                 noise_var = noise_var,
                 cov_main = cov_main,
                 cov_fic = cov_fic,
                 compute_ll=True,
                 sparse_threshold=0,
                 build_tree=False)


        gp1.save_trained_model("test_csfic.npz")
        gp2 = GP(fname="test_csfic.npz", build_tree=False)

        pts = np.reshape(np.linspace(-5, 5, 20), (-1, 1))
        p1 = gp1.predict(pts)
        v1 = gp1.variance(pts)
        p2 = gp2.predict(pts)
        v2 = gp2.variance(pts)

        self.assertTrue((p1 == p2).all())
        self.assertTrue((v1 == v2).all())
Beispiel #2
0
    def test_no_fic(self):
        # a CSFIC GP with a trivial FIC component should be equivalent to a plain GP.

        cov_main = GPCov(wfn_params=[1.0,], dfn_params=[ 2.5,], wfn_str="compact0", dfn_str="euclidean")
        cov_fic_tiny = GPCov(wfn_params=[0.00000000000001,], dfn_params=[ 0.0000000000001,], wfn_str="se", dfn_str="euclidean", Xu = self.u)
        noise_var = 1.0

        gp1 = GP(X=self.X,
                 y=self.y1,
                 noise_var = noise_var,
                 cov_main = cov_main,
                 cov_fic = cov_fic_tiny,
                 compute_ll=True,
                 sparse_threshold=0,
                 build_tree=False)

        gp2 = GP(X=self.X,
                 y=self.y1,
                 noise_var = noise_var,
                 cov_main = cov_main,
                 cov_fic = None,
                 compute_ll=True,
                 sparse_threshold=0,
                 build_tree=False)


        x_test = np.linspace(-6,6,20)
        pred1 = gp1.predict(np.reshape(x_test, (-1,1)))
        pred2 = gp2.predict(np.reshape(x_test, (-1,1)))

        self.assertTrue( ( np.abs(pred1-pred2) < 0.0001 ).all() )

        self.assertAlmostEqual(gp1.ll, gp2.ll)
Beispiel #3
0
    def test_predict(self):
        cov_main = GPCov(wfn_params=[1.0,], dfn_params=[ 2.5,], wfn_str="compact0", dfn_str="euclidean")
        cov_fic = GPCov(wfn_params=[1.0,], dfn_params=[ 1.5,], wfn_str="se", dfn_str="euclidean", Xu = self.u)
        noise_var = 1.0

        gp = GP(X=self.X,
                 y=self.y1,
                 noise_var = noise_var,
                 cov_main = cov_main,
                 cov_fic = cov_fic,
                 compute_ll=True,
                 sparse_threshold=0,
                 build_tree=False)


        x_test = np.reshape(np.linspace(-6,6,20), (-1, 1))
        pred = gp.predict(x_test)

        true_pred = [-0.408347998350141, -0.562026829764370, -0.495194901764167, -0.221325067983013, -0.108486527530546, -0.382362268970377, -1.048794656436814, -1.603860074325163, -1.642537462422822, -1.301189117082765, -0.590641591880188,  0.013777393473535,  0.078987024190573, -0.060588451482957, -0.779405572439649, -1.566025186126343, -1.839795343563452, -1.918702553019862, -1.422525401522495, -0.783325324315960]
        self.assertTrue( ( np.abs(pred-true_pred) < 1e-7 ).all() )


        var = gp.variance(x_test)
        true_var = [1.845225336549096, 1.656486150287482, 1.467352884081546, 1.383325051904399, 1.238510769551563, 0.823748649278482, 0.390452316432541, 0.493252719187497, 0.988398813692138, 1.290702032271763, 1.290702032271763, 0.988398813692137, 0.493252719187498, 0.390452316432541, 0.823748649278483, 1.238510769551562, 1.383325051904399, 1.467352884081546, 1.656486150287482, 1.845225336549096]
        self.assertTrue( ( np.abs(var-true_var) < 1e-7 ).all() )

        true_ll = -45.986450666568985
        self.assertAlmostEqual(true_ll, gp.ll, places=8)
Beispiel #4
0
    def test_no_cs(self):
        # a CSFIC GP with a trivial CS component should be equivalent to a plain FIC GP.

        # here we compare to "true" values from the GPStuff MATLAB toolbox on the same training data.
        # the code to reproduce these is in test/matlab/no_cs.m.

        cov_main_tiny = GPCov(wfn_params=[1e-15,], dfn_params=[ 1e-15,], wfn_str="compact0", dfn_str="euclidean")
        cov_fic = GPCov(wfn_params=[1.0,], dfn_params=[ 1.5,], wfn_str="se", dfn_str="euclidean", Xu = self.u)
        noise_var = 1.0

        gp = GP(X=self.X,
                 y=self.y1,
                 noise_var = noise_var,
                 cov_main = cov_main_tiny,
                 cov_fic = cov_fic,
                 compute_ll=True,
                 compute_grad=False,
                 sparse_threshold=0,
                 build_tree=False,
                sparse_invert=False)

        x_test = np.linspace(-6,6,20)
        pred = gp.predict(np.reshape(x_test, (-1,1)))

        true_pred = [-0.001009578312505, -0.007987369239529, -0.044328133303421, -0.172570712418792, -0.471267001236270, -0.902780793888510, -1.213226123858856, -1.144504376709686, -0.762372014152344, -0.378150072159795, -0.198456066738416, -0.207895507020449, -0.279193923665475, -0.292235035085667, -0.217163186457927, -0.113346671331517, -0.041505175187682, -0.010661391852200, -0.001921047707732, -0.000242814374795]
        self.assertTrue( ( np.abs(pred-true_pred) < 0.001 ).all() )


        true_ll = -52.1826173884063
        self.assertAlmostEqual(true_ll, gp.ll, places=2)
Beispiel #5
0
    def prediction_error_gp(self, x):
        XX = x.reshape(self.X_obs.shape)
        ntest = self.n - self.ntrain
        ll = 0

        gp = GP(X=XX,
                y=self.SY[:, 0:1],
                cov_main=self.cov,
                noise_var=self.noise_var,
                sort_events=False,
                sparse_invert=False)
        pred_cov = gp.covariance(self.Xtest, include_obs=True)
        logdet = np.linalg.slogdet(pred_cov)[1]
        pred_prec = np.linalg.inv(pred_cov)

        for y, yt in zip(self.SY.T, self.Ytest.T):
            gp.y = y
            gp.alpha_r = gp.factor(y)
            pred_means = gp.predict(self.Xtest)
            rt = yt - pred_means

            lly = -.5 * np.dot(rt, np.dot(pred_prec, rt))
            lly += -.5 * logdet
            lly += -.5 * ntest * np.log(2 * np.pi)

            ll += lly

        return ll
Beispiel #6
0
    def test_load_save(self):
        gp1 = self.gp
        gp1.save_trained_model("test_semi.npz")
        gp2 = GP(fname="test_semi.npz", build_tree=False)

        pts = np.reshape(np.linspace(-5, 5, 20), (-1, 1))
        p1 = gp1.predict(pts)
        v1 = gp1.variance(pts)
        p2 = gp2.predict(pts)
        v2 = gp2.variance(pts)

        self.assertTrue((p1 == p2).all())
        self.assertTrue((v1 == v2).all())
Beispiel #7
0
    def prediction_error_gp(self, x):
        XX = x.reshape(self.X_obs.shape)
        ntest = self.n-self.ntrain
        ll = 0

        gp = GP(X=XX, y=self.SY[:, 0:1], cov_main=self.cov, noise_var=self.noise_var,
                sort_events=False, sparse_invert=False)
        pred_cov = gp.covariance(self.Xtest, include_obs=True)
        logdet = np.linalg.slogdet(pred_cov)[1]
        pred_prec = np.linalg.inv(pred_cov)

        for y, yt in zip(self.SY.T, self.Ytest.T):
            gp.y = y
            gp.alpha_r = gp.factor(y)
            pred_means = gp.predict(self.Xtest)
            rt = yt - pred_means

            lly =  -.5 * np.dot(rt, np.dot(pred_prec, rt))
            lly += -.5 * logdet
            lly += -.5 * ntest * np.log(2*np.pi)

            ll += lly

        return ll