示例#1
0
    def test_mc_poly_verification(self):
        dim = 1
        alpha = np.array([[0, 1, 2]])
        par = np.array([[1.0, 1]])
        model = BayesSardModel(1,
                               par,
                               multi_ind=2,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        px = model._exp_x_px(alpha)
        xpx = model._exp_x_xpx(alpha)
        pxpx = model._exp_x_pxpx(alpha)
        kxpx = model._exp_x_kxpx(par, alpha, self.data_1d)

        # approximate expectations using cumulative moving average MC
        def cma_mc(new_samples, old_avg, old_avg_size, axis=0):
            b_size = new_samples.shape[axis]
            return (new_samples.sum(axis=axis) +
                    old_avg_size * old_avg) / (old_avg_size + b_size)

        batch_size = 100000
        num_iter = 100
        px_mc, xpx_mc, pxpx_mc, kxpx_mc = 0, 0, 0, 0
        for i in range(num_iter):
            # sample from standard Gaussian
            x_samples = np.random.multivariate_normal(np.zeros((dim, )),
                                                      np.eye(dim),
                                                      size=batch_size).T
            p = vandermonde(alpha, x_samples)  # (N, Q)
            k = model.kernel.eval(par, x_samples, self.data_1d,
                                  scaling=False)  # (N, M)
            px_mc = cma_mc(p, px_mc, i * batch_size, axis=0)
            xpx_mc = cma_mc(x_samples[..., na] * p[na, ...],
                            xpx_mc,
                            i * batch_size,
                            axis=1)
            pxpx_mc = cma_mc(p[:, na, :] * p[..., na],
                             pxpx_mc,
                             i * batch_size,
                             axis=0)
            kxpx_mc = cma_mc(k[..., na] * p[:, na, :],
                             kxpx_mc,
                             i * batch_size,
                             axis=0)

        # compare MC approximates with analytic expressions
        tol = 5e-3
        print('Maximum absolute difference using {:d} samples.'.format(
            batch_size * num_iter))
        print('px {:.2e}'.format(np.abs(px - px_mc).max()))
        print('xpx {:.2e}'.format(np.abs(xpx - xpx_mc).max()))
        print('pxpx {:.2e}'.format(np.abs(pxpx - pxpx_mc).max()))
        print('kxpx {:.2e}'.format(np.abs(kxpx - kxpx_mc).max()))
        self.assertLessEqual(np.abs(px - px_mc).max(), tol)
        self.assertLessEqual(np.abs(xpx - xpx_mc).max(), tol)
        self.assertLessEqual(np.abs(pxpx - pxpx_mc).max(), tol)
        self.assertLessEqual(np.abs(kxpx - kxpx_mc).max(), tol)
示例#2
0
 def test_exp_x_kxpx(self):
     model = BayesSardModel(1,
                            self.ker_par_1d,
                            multi_ind=2,
                            point_str='ut',
                            point_par=self.pt_par_ut)
     mi_1d = np.array([[0, 1, 2]])
     par_1d = np.array([[1.0, 1.0]])
     data = np.array([[0, 1, -1]], dtype=np.float)
     ke = model._exp_x_kxpx(par_1d, mi_1d, data)
     ke_true = np.array([[2**(-0.5), 0, 1 / (2 * (2**0.5))],
                         [
                             2**(-0.5) * np.exp(-0.25),
                             np.exp(-0.25) / (2 * (2**0.5)),
                             3 * np.exp(-0.25) / (4 * 2**0.5)
                         ],
                         [
                             2**(-0.5) * np.exp(-0.25),
                             -np.exp(-0.25) / (2 * (2**0.5)),
                             3 * np.exp(-0.25) / (4 * 2**0.5)
                         ]])
     self.assertTrue(ke.shape == (data.shape[1], mi_1d.shape[1]))
     self.assertTrue(np.allclose(ke, ke_true))