Example #1
0
    def test_prediction(self):
        model = BayesSardModel(1,
                               self.ker_par_1d,
                               multi_ind=2,
                               point_str='gh',
                               point_par={'degree': 5})
        xtest = np.linspace(-5, 5, 100)[na, :]
        y = fcn(model.points)
        f = fcn(xtest)
        alpha = np.array([[0, 1, 2]])
        mean, var = model.predict(xtest, y, mulind=alpha)
        std = np.sqrt(var)

        # plot training data, predictive mean and variance
        fig_title = 'BSQ model predictions'
        fig = plt.figure(fig_title)
        xtest = np.squeeze(xtest)
        plt.fill_between(xtest,
                         mean - 2 * std,
                         mean + 2 * std,
                         color='0.1',
                         alpha=0.15)
        plt.plot(xtest, mean, color='k', lw=2)
        plt.plot(model.points, y, 'ko', ms=8)

        # true function values at test points if provided
        if f is not None:
            plt.plot(xtest, np.squeeze(f), lw=2, ls='--', color='tomato')
        plt.show()
Example #2
0
 def test_weights_ut_5d(self):
     model = BayesSardModel(5,
                            np.array([[1.0, 25, 25, 25, 25, 25]]),
                            point_str='ut')
     alpha = np.hstack((np.zeros(
         (5, 1)), np.eye(5), 2 * np.eye(5))).astype(np.int)
     par = np.array([[1.0, 25, 25, 25, 25, 25]])
     w, wc, wcc, emv, ivar = model.bq_weights(par, alpha)
     # self.assertTrue(np.allclose(w, UnscentedTransform.weights(5, beta=0)[0]))
     self.assertGreaterEqual(emv, 0)
     self.assertGreaterEqual(ivar, 0)
     # test positive definiteness
     try:
         la.cholesky(wc)
     except la.LinAlgError:
         self.fail("Weights not positive definite. Min eigval: {}".format(
             la.eigvalsh(wc).min()))
Example #3
0
 def test_weights_ut_2d(self):
     # UT weights in 2D
     par = np.array([[1.0, 1.0, 1]])
     alpha = np.array([[0, 1, 0, 2, 0], [0, 0, 1, 0, 2]])
     model = BayesSardModel(2,
                            par,
                            point_str='ut',
                            point_par=self.pt_par_ut)
     w, wc, wcc, emv, ivar = model.bq_weights(par, alpha)
     # UT weights reproduced in 2D?
     self.assertTrue(np.allclose(w, UnscentedTransform.weights(2)[0]))
     self.assertGreaterEqual(emv, 0)
     self.assertGreaterEqual(ivar, 0)
     # test positive definiteness
     try:
         la.cholesky(wc)
     except la.LinAlgError:
         self.fail("Weights not positive definite. Min eigval: {}".format(
             la.eigvalsh(wc).min()))
Example #4
0
 def test_weights_gh5_1d(self):
     # GH-5 weights in 1D
     model = BayesSardModel(1,
                            self.ker_par_1d,
                            point_str='gh',
                            point_par={'degree': 5})
     alpha = np.array([[0, 1, 2, 3, 4]])
     w, wc, wcc, emv, ivar = model.bq_weights(self.ker_par_1d, alpha)
     # GH-5 weights in 1D reproduced?
     self.assertTrue(
         np.allclose(w, GaussHermiteTransform.weights(1, degree=5)))
     self.assertGreaterEqual(emv, 0)
     self.assertGreaterEqual(ivar, 0)
     # test positive definiteness
     try:
         la.cholesky(wc)
     except la.LinAlgError:
         self.fail("Weights not positive definite. Min eigval: {}".format(
             la.eigvalsh(wc).min()))
Example #5
0
 def test_weights_gh3_2d(self):
     # GH-3 weights in 2D
     # there are 6 multivariate polynomials in 2D, UT has only 5 points in 2D
     model = BayesSardModel(2,
                            self.ker_par_2d,
                            point_str='gh',
                            point_par={'degree': 3})
     alpha = np.array([[0, 1, 0, 1, 2, 0, 1, 2, 2],
                       [0, 0, 1, 1, 0, 2, 2, 1, 2]])
     par = np.array([[1.0, 1, 1]])
     w, wc, wcc, emv, ivar = model.bq_weights(par, alpha)
     self.assertTrue(np.allclose(w, GaussHermiteTransform.weights(2, 3)))
     self.assertGreaterEqual(emv, 0)
     self.assertGreaterEqual(ivar, 0)
     # test positive definiteness
     try:
         la.cholesky(wc)
     except la.LinAlgError:
         self.fail("Weights not positive definite. Min eigval: {}".format(
             la.eigvalsh(wc).min()))
Example #6
0
 def test_weights_sr_1d(self):
     # SR weights == UT weights for kappa=0 and alpha=1
     model = BayesSardModel(1,
                            self.ker_par_1d,
                            point_str='ut',
                            point_par={
                                'kappa': 0,
                                'alpha': 1
                            })
     alpha = np.array([[0, 1, 2]])
     w, wc, wcc, emv, ivar = model.bq_weights(self.ker_par_1d, alpha)
     # UT weights in 1D reproduced?
     self.assertTrue(np.allclose(w[1:],
                                 SphericalRadialTransform.weights(1)))
     self.assertGreaterEqual(emv, 0)
     self.assertGreaterEqual(ivar, 0)
     # test positive definiteness
     try:
         la.cholesky(wc)
     except la.LinAlgError:
         self.fail("Weights not positive definite. Min eigval: {}".format(
             la.eigvalsh(wc).min()))
Example #7
0
 def test_exp_x_kxpx(self):
     model = BayesSardModel(1,
                            self.ker_par_1d,
                            multi_ind=2,
                            point_str='ut',
                            point_par=self.pt_par_ut)
     mi_1d = np.array([[0, 1, 2]])
     par_1d = np.array([[1.0, 1.0]])
     data = np.array([[0, 1, -1]], dtype=np.float)
     ke = model._exp_x_kxpx(par_1d, mi_1d, data)
     ke_true = np.array([[2**(-0.5), 0, 1 / (2 * (2**0.5))],
                         [
                             2**(-0.5) * np.exp(-0.25),
                             np.exp(-0.25) / (2 * (2**0.5)),
                             3 * np.exp(-0.25) / (4 * 2**0.5)
                         ],
                         [
                             2**(-0.5) * np.exp(-0.25),
                             -np.exp(-0.25) / (2 * (2**0.5)),
                             3 * np.exp(-0.25) / (4 * 2**0.5)
                         ]])
     self.assertTrue(ke.shape == (data.shape[1], mi_1d.shape[1]))
     self.assertTrue(np.allclose(ke, ke_true))
Example #8
0
    def test_mc_poly_verification(self):
        dim = 1
        alpha = np.array([[0, 1, 2]])
        par = np.array([[1.0, 1]])
        model = BayesSardModel(1,
                               par,
                               multi_ind=2,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        px = model._exp_x_px(alpha)
        xpx = model._exp_x_xpx(alpha)
        pxpx = model._exp_x_pxpx(alpha)
        kxpx = model._exp_x_kxpx(par, alpha, self.data_1d)

        # approximate expectations using cumulative moving average MC
        def cma_mc(new_samples, old_avg, old_avg_size, axis=0):
            b_size = new_samples.shape[axis]
            return (new_samples.sum(axis=axis) +
                    old_avg_size * old_avg) / (old_avg_size + b_size)

        batch_size = 100000
        num_iter = 100
        px_mc, xpx_mc, pxpx_mc, kxpx_mc = 0, 0, 0, 0
        for i in range(num_iter):
            # sample from standard Gaussian
            x_samples = np.random.multivariate_normal(np.zeros((dim, )),
                                                      np.eye(dim),
                                                      size=batch_size).T
            p = vandermonde(alpha, x_samples)  # (N, Q)
            k = model.kernel.eval(par, x_samples, self.data_1d,
                                  scaling=False)  # (N, M)
            px_mc = cma_mc(p, px_mc, i * batch_size, axis=0)
            xpx_mc = cma_mc(x_samples[..., na] * p[na, ...],
                            xpx_mc,
                            i * batch_size,
                            axis=1)
            pxpx_mc = cma_mc(p[:, na, :] * p[..., na],
                             pxpx_mc,
                             i * batch_size,
                             axis=0)
            kxpx_mc = cma_mc(k[..., na] * p[:, na, :],
                             kxpx_mc,
                             i * batch_size,
                             axis=0)

        # compare MC approximates with analytic expressions
        tol = 5e-3
        print('Maximum absolute difference using {:d} samples.'.format(
            batch_size * num_iter))
        print('px {:.2e}'.format(np.abs(px - px_mc).max()))
        print('xpx {:.2e}'.format(np.abs(xpx - xpx_mc).max()))
        print('pxpx {:.2e}'.format(np.abs(pxpx - pxpx_mc).max()))
        print('kxpx {:.2e}'.format(np.abs(kxpx - kxpx_mc).max()))
        self.assertLessEqual(np.abs(px - px_mc).max(), tol)
        self.assertLessEqual(np.abs(xpx - xpx_mc).max(), tol)
        self.assertLessEqual(np.abs(pxpx - pxpx_mc).max(), tol)
        self.assertLessEqual(np.abs(kxpx - kxpx_mc).max(), tol)
Example #9
0
    def test_weights_ut_1d(self):
        # UT weights in 1D
        model = BayesSardModel(1,
                               self.ker_par_1d,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        alpha = np.array([[0, 1, 2]])
        w, wc, wcc, emv, ivar = model.bq_weights(self.ker_par_1d, alpha)
        # UT weights in 1D reproduced?
        self.assertTrue(np.allclose(w, UnscentedTransform.weights(1)[0]))
        self.assertGreaterEqual(emv, 0)
        self.assertGreaterEqual(ivar, 0)
        # test positive definiteness
        try:
            la.cholesky(wc)
        except la.LinAlgError:
            self.fail("Weights not positive definite. Min eigval: {}".format(
                la.eigvalsh(wc).min()))

        # UT weights in 1D, different kappa and alpha
        model = BayesSardModel(1,
                               self.ker_par_1d,
                               point_str='ut',
                               point_par={
                                   'kappa': 2,
                                   'alpha': 1
                               })
        alpha = np.array([[0, 1, 2]])
        w, wc, wcc, emv, ivar = model.bq_weights(self.ker_par_1d, alpha)
        # UT weights in 1D reproduced?
        self.assertTrue(
            np.allclose(w,
                        UnscentedTransform.weights(1, kappa=2, alpha=1)[0]))
        self.assertGreaterEqual(emv, 0)
        self.assertGreaterEqual(ivar, 0)
        # test positive definiteness
        try:
            la.cholesky(wc)
        except la.LinAlgError:
            self.fail("Weights not positive definite. Min eigval: {}".format(
                la.eigvalsh(wc).min()))
Example #10
0
    def test_exp_x_xpx(self):
        model = BayesSardModel(1,
                               self.ker_par_1d,
                               multi_ind=2,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        mi_1d = np.array([[0, 1, 2]])
        ke = model._exp_x_xpx(mi_1d)
        self.assertTrue(ke.shape == mi_1d.shape)
        self.assertTrue(np.array_equal(ke, np.array([[0, 1, 0]])))

        model = BayesSardModel(2,
                               self.ker_par_2d,
                               multi_ind=2,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        mi_2d = np.array([[0, 1, 0, 1, 0, 2], [0, 0, 1, 1, 2, 0]])
        ke_true = np.array([[0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]])
        ke = model._exp_x_xpx(mi_2d)
        self.assertTrue(ke.shape == mi_2d.shape)
        self.assertTrue(np.array_equal(ke, ke_true))
Example #11
0
 def test_init(self):
     BayesSardModel(1,
                    self.ker_par_1d,
                    multi_ind=2,
                    point_str='ut',
                    point_par=self.pt_par_ut)