Exemplo n.º 1
0
    def test_mc_poly_verification(self):
        dim = 1
        alpha = np.array([[0, 1, 2]])
        par = np.array([[1.0, 1]])
        model = BayesSardModel(1,
                               par,
                               multi_ind=2,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        px = model._exp_x_px(alpha)
        xpx = model._exp_x_xpx(alpha)
        pxpx = model._exp_x_pxpx(alpha)
        kxpx = model._exp_x_kxpx(par, alpha, self.data_1d)

        # approximate expectations using cumulative moving average MC
        def cma_mc(new_samples, old_avg, old_avg_size, axis=0):
            b_size = new_samples.shape[axis]
            return (new_samples.sum(axis=axis) +
                    old_avg_size * old_avg) / (old_avg_size + b_size)

        batch_size = 100000
        num_iter = 100
        px_mc, xpx_mc, pxpx_mc, kxpx_mc = 0, 0, 0, 0
        for i in range(num_iter):
            # sample from standard Gaussian
            x_samples = np.random.multivariate_normal(np.zeros((dim, )),
                                                      np.eye(dim),
                                                      size=batch_size).T
            p = vandermonde(alpha, x_samples)  # (N, Q)
            k = model.kernel.eval(par, x_samples, self.data_1d,
                                  scaling=False)  # (N, M)
            px_mc = cma_mc(p, px_mc, i * batch_size, axis=0)
            xpx_mc = cma_mc(x_samples[..., na] * p[na, ...],
                            xpx_mc,
                            i * batch_size,
                            axis=1)
            pxpx_mc = cma_mc(p[:, na, :] * p[..., na],
                             pxpx_mc,
                             i * batch_size,
                             axis=0)
            kxpx_mc = cma_mc(k[..., na] * p[:, na, :],
                             kxpx_mc,
                             i * batch_size,
                             axis=0)

        # compare MC approximates with analytic expressions
        tol = 5e-3
        print('Maximum absolute difference using {:d} samples.'.format(
            batch_size * num_iter))
        print('px {:.2e}'.format(np.abs(px - px_mc).max()))
        print('xpx {:.2e}'.format(np.abs(xpx - xpx_mc).max()))
        print('pxpx {:.2e}'.format(np.abs(pxpx - pxpx_mc).max()))
        print('kxpx {:.2e}'.format(np.abs(kxpx - kxpx_mc).max()))
        self.assertLessEqual(np.abs(px - px_mc).max(), tol)
        self.assertLessEqual(np.abs(xpx - xpx_mc).max(), tol)
        self.assertLessEqual(np.abs(pxpx - pxpx_mc).max(), tol)
        self.assertLessEqual(np.abs(kxpx - kxpx_mc).max(), tol)
Exemplo n.º 2
0
    def test_exp_x_xpx(self):
        model = BayesSardModel(1,
                               self.ker_par_1d,
                               multi_ind=2,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        mi_1d = np.array([[0, 1, 2]])
        ke = model._exp_x_xpx(mi_1d)
        self.assertTrue(ke.shape == mi_1d.shape)
        self.assertTrue(np.array_equal(ke, np.array([[0, 1, 0]])))

        model = BayesSardModel(2,
                               self.ker_par_2d,
                               multi_ind=2,
                               point_str='ut',
                               point_par=self.pt_par_ut)
        mi_2d = np.array([[0, 1, 0, 1, 0, 2], [0, 0, 1, 1, 2, 0]])
        ke_true = np.array([[0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0]])
        ke = model._exp_x_xpx(mi_2d)
        self.assertTrue(ke.shape == mi_2d.shape)
        self.assertTrue(np.array_equal(ke, ke_true))