Beispiel #1
0
    def test_compute_f_divergence(self):
        # KL divergence
        from scipy.stats import multivariate_normal
        nvars = 1
        mean = np.random.uniform(-0.1, 0.1, nvars)
        cov = np.diag(np.random.uniform(.5, 1, nvars))
        rv1 = multivariate_normal(mean, cov)
        rv2 = multivariate_normal(np.zeros(nvars), np.eye(nvars))

        def density1(x):
            return rv1.pdf(x.T)

        def density2(x):
            return rv2.pdf(x.T)

        # Integrate on [-radius,radius]
        # Note this induces small error by truncating domain
        radius = 10
        from pyapprox import get_tensor_product_quadrature_rule
        x, w = get_tensor_product_quadrature_rule(
            400,
            nvars,
            np.polynomial.legendre.leggauss,
            transform_samples=lambda x: x * radius,
            density_function=lambda x: radius * np.ones(x.shape[1]))
        quad_rule = x, w
        div = compute_f_divergence(density1,
                                   density2,
                                   quad_rule,
                                   'KL',
                                   normalize=False)
        true_div = 0.5 * (np.diag(cov) + mean**2 - np.log(np.diag(cov)) -
                          1).sum()
        assert np.allclose(div, true_div, rtol=1e-12)

        # Hellinger divergence
        from scipy.stats import beta
        a1, b1, a2, b2 = 1, 1, 2, 3
        rv1, rv2 = beta(a1, b1), beta(a2, b2)
        true_div = 2 * (1 - beta_fn(
            (a1 + a2) / 2,
            (b1 + b2) / 2) / np.sqrt(beta_fn(a1, b1) * beta_fn(a2, b2)))

        x, w = get_tensor_product_quadrature_rule(
            500,
            nvars,
            np.polynomial.legendre.leggauss,
            transform_samples=lambda x: (x + 1) / 2,
            density_function=lambda x: 0.5 * np.ones(x.shape[1]))
        quad_rule = x, w
        div = compute_f_divergence(rv1.pdf,
                                   rv2.pdf,
                                   quad_rule,
                                   'hellinger',
                                   normalize=False)
        assert np.allclose(div, true_div, rtol=1e-10)
 def test_tensor_product_quadrature(self):
     num_vars = 2
     alpha_poly=1
     beta_poly=2
     def univariate_quadrature_rule(n):
         x,w = gauss_jacobi_pts_wts_1D(n,alpha_poly,beta_poly)
         x=(x+1)/2.
         return x,w
     
     x,w = get_tensor_product_quadrature_rule(
         100,num_vars,univariate_quadrature_rule)
     function = lambda x: np.sum(x**2,axis=0)
     assert np.allclose(np.dot(function(x),w),0.8)
Beispiel #3
0
 def get_quadrature_rule(self):
     nvars = self.variable.num_vars()
     degrees=[10]*nvars
     var_trans = pya.AffineRandomVariableTransformation(self.variable)
     gauss_legendre = partial(
         pya.gauss_jacobi_pts_wts_1D,alpha_poly=0,beta_poly=0)
     univariate_quadrature_rules = [
         gauss_legendre,gauss_legendre,pya.gauss_hermite_pts_wts_1D,
         pya.gauss_hermite_pts_wts_1D,pya.gauss_hermite_pts_wts_1D]
     x,w = pya.get_tensor_product_quadrature_rule(
         degrees,self.variable.num_vars(),univariate_quadrature_rules,
         var_trans.map_from_canonical_space)
     return x,w
Beispiel #4
0
#Lets plot the posterior distribution and the MCMC samples. First we must compute the evidence
def unnormalized_posterior(x):
    vals = np.exp(loglike.loglike(x))
    rvs = variables.all_variables()
    for ii in range(variables.num_vars()):
        vals[:, 0] *= rvs[ii].pdf(x[ii, :])
    return vals


def univariate_quadrature_rule(n):
    x, w = pya.gauss_jacobi_pts_wts_1D(n, 0, 0)
    x *= 2
    return x, w


x, w = pya.get_tensor_product_quadrature_rule(100, variables.num_vars(),
                                              univariate_quadrature_rule)
evidence = unnormalized_posterior(x)[:, 0].dot(w)
print('evidence', evidence)

plt.figure()
X, Y, Z = pya.get_meshgrid_function_data(
    lambda x: unnormalized_posterior(x) / evidence, plot_range, 50)
plt.contourf(X,
             Y,
             Z,
             levels=np.linspace(Z.min(), Z.max(), 30),
             cmap=matplotlib.cm.coolwarm)
plt.plot(samples[0, :], samples[1, :], 'ko')
plt.show()

#%%
Beispiel #5
0
#   &\le N^{-1}\var{Q_\alpha}+C_{d,r} N_{\bi}^{-s/d}
#
#Because a surrogate is inexpensive to evaluate the first term can be driven to zero so that only the bias remains. Thus the error in the Monte Carlo estimate of the mean using the surrogate is dominated by the error in the surrogate. If this error can be reduced more quickly than \frac{N^{-1}} (as is the case for low-dimensional tensor-product interpolation) then using surrogates for computing moments is very effective.
#
#Note that moments can be estimated without using Monte-Carlo sampling by levaraging properties of the univariate interpolation rules used to build the multi-variate interpolant. Specifically, the expectation of a tensor product interpolant can be computed without explicitly forming the interpolant and is given by
#
#.. math::
#
#   \mu_{\bi}=\int_{\rvdom} \sum_{\V{j}\le\bi}f_\ai(\rv^{(\V{j})})\prod_{i=1}^d\phi_{i,j_i}(\rv_i) w(\rv)\,d\rv=\sum_{\V{j}\le\bi} f_\ai(\rv^{(\V{j})}) v_{\V{j}}.
#
#The expectation is simply the weighted sum of the Cartesian-product of the univariate quadrature weights
#
#.. math:: v_{\V{j}}=\prod_{i=1}^d\int_{\rvdom_i}{\phi_{i,j_i}(\rv_i)}\,dw(\rv_i),
#
#which can be computed analytically.
x, w = pya.get_tensor_product_quadrature_rule(level, 2,
                                              pya.clenshaw_curtis_pts_wts_1D)
surrogate_mean = f(x)[:, 0].dot(w)
print('Quadrature mean', surrogate_mean)
#%%
#Here we have recomptued the values of :math:`f` at the interpolation samples, but in practice we sould just re-use the values collected when building the interpolant.
#
#Now let us compare the quadrature mean with the MC mean computed using the surrogate
num_samples = int(1e6)
samples = np.random.uniform(-1, 1, (2, num_samples))
values = interp(samples)
mc_mean = values.mean()
print('Monte Carlo surrogate mean', mc_mean)
#%%
#References
#^^^^^^^^^^
#