예제 #1
0
def plot_discrete_distribution_surface_2d(rv1, rv2, ax=None):
    """
    Only works if rv1 and rv2 are defined on consecutive integers
    """
    from matplotlib import cm
    from pyapprox.utilities import cartesian_product, outer_product
    from pyapprox.variables import get_probability_masses

    if ax is None:
        fig = plt.figure(figsize=(8, 6))
        ax = fig.add_subplot(111, projection='3d')
    x_1d = [get_probability_masses(rv)[0] for rv in [rv1, rv2]]
    w_1d = [get_probability_masses(rv)[1] for rv in [rv1, rv2]]
    samples = cartesian_product(x_1d)
    weights = outer_product(w_1d)

    dz = weights
    cmap = cm.get_cmap('jet')  # Get desired colormap - you can change this!
    max_height = np.max(dz)    # get range of colorbars so we can normalize
    min_height = np.min(dz)
    # scale each z to [0,1], and get their rgb values
    rgba = [cmap((k-min_height)/max_height) for k in dz]
    # Only works if rv1 and rv2 are defined on consecutive integers
    dx, dy = 1, 1
    ax.bar3d(samples[0, :], samples[1, :], 0, dx, dy, dz, color=rgba,
             zsort='average')

    angle = 45
    ax.view_init(10, angle)
    ax.set_axis_off()
예제 #2
0
def convert_multivariate_lagrange_polys_to_orthonormal_polys(
        subspace_index,subspace_values,coeffs_1d,poly_indices,
        config_variables_idx):

    if config_variables_idx is None:
        config_variables_idx = subspace_index.shape[0]
    
    active_sample_vars = np.where(subspace_index[:config_variables_idx]>0)[0]
    num_active_sample_vars = active_sample_vars.shape[0]

    if num_active_sample_vars==0:
        coeffs = subspace_values
        return coeffs
    
    num_indices = poly_indices.shape[1]
    num_qoi = subspace_values.shape[1]
    coeffs = np.zeros((num_indices,num_qoi),dtype=float)
    for ii in range(num_indices):
        poly_coeffs_1d = \
            [coeffs_1d[dd][subspace_index[dd]][:,poly_indices[dd,ii]] 
             for dd in active_sample_vars]
        poly_coeffs = outer_product(poly_coeffs_1d)
        coeffs += subspace_values[ii,:]*poly_coeffs[:,np.newaxis]
        
    return coeffs
예제 #3
0
def product_of_independent_random_variables_pdf(pdf,gauss_quadrature_rules,zz):
    """
    Compute PDF of Z = X_1*X_2*...*X_d
    Parameters
    ---------
    pdf : callable
        PDF of X_1
    
    gauss_quadrature_rules : list [x,w]
        List of gaussian quadrature rules which integrate with respect to PDFs
        of X_2,...X_d

    zz : np.ndarray (num_samples)
       The locations to evaluate the pdf of Z
    """
    num_vars = len(gauss_quadrature_rules)+1
    xx = cartesian_product(
        [gauss_quadrature_rules[ii][0] for ii in range(num_vars-1)])
    ww = outer_product(
        [gauss_quadrature_rules[ii][1] for ii in range(num_vars-1)])
    
    vals = np.zeros_like(zz)
    for ii in range(vals.shape[0]):
        vals[ii] = np.dot(
            ww,pdf(zz[ii]/xx.prod(axis=0))/np.absolute(xx.prod(axis=0)))
    return vals
    def test_inner_products_on_active_subspace(self):
        num_vars=4; num_active_vars = 2; degree=3;
        A = np.random.normal(0,1,(num_vars,num_vars))
        Q, R = np.linalg.qr( A )
        W1 = Q[:,:num_active_vars]
        
        as_poly_indices = np.asarray([
            [0,0],[1,0],[0,1],[2,0],[1,1],[0,2]
            ]).T

        x1d,w1d = np.polynomial.legendre.leggauss(10)
        w1d /=2
        gl_samples = cartesian_product([x1d]*num_vars)
        gl_weights = outer_product([w1d]*num_vars)
        as_gl_samples = np.dot(W1.T,gl_samples)

        inner_product_indices = np.empty(
            (num_active_vars,as_poly_indices.shape[1]**2),dtype=int)
        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                inner_product_indices[:,ii*as_poly_indices.shape[1]+jj]=\
                    as_poly_indices[:,ii]+as_poly_indices[:,jj]

        vandermonde = monomial_basis_matrix(inner_product_indices,as_gl_samples)

        inner_products = inner_products_on_active_subspace(
            W1.T,as_poly_indices,monomial_mean_uniform_variables)

        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                assert np.allclose(
                    inner_products[ii,jj],
                    np.dot(vandermonde[:,ii*as_poly_indices.shape[1]+jj],
                           gl_weights))
def predictor_corrector_function_of_independent_variables(
        nterms, univariate_quad_rules, fun):
    """
    Use predictor corrector method to compute the recursion coefficients
    of a univariate orthonormal polynomial orthogonal to the density
    associated with a scalar function of a set of independent 1D
    variables

    Parameters
    ----------
    nterms : integer
        The number of coefficients requested

    univariate_quad_rules : callable
        The univariate quadrature rules which include weights of
        each indendent variable

    fun : callable
        The function mapping indendent variables into a scalar variable
    """

    ab = np.zeros((nterms, 2))
    x_1d = [rule[0] for rule in univariate_quad_rules]
    w_1d = [rule[1] for rule in univariate_quad_rules]
    quad_samples = cartesian_product(x_1d, 1)
    quad_weights = outer_product(w_1d)

    # for probablity measures the following will always be one, but
    # this is not true for other measures
    ab[0, 1] = np.sqrt(quad_weights.sum())

    for ii in range(1, nterms):
        # predict
        ab[ii, 1] = ab[ii - 1, 1]
        if ii > 1:
            ab[ii - 1, 0] = ab[ii - 2, 0]
        else:
            ab[ii - 1, 0] = 0

        def integrand(x):
            y = fun(x).squeeze()
            pvals = evaluate_orthonormal_polynomial_1d(y, ii, ab)
            # measure not included in integral because it is assumed to
            # be in the quadrature rules
            return pvals[:, ii] * pvals[:, ii - 1]

        G_ii_iim1 = integrand(quad_samples).dot(quad_weights)
        ab[ii - 1, 0] += ab[ii - 1, 1] * G_ii_iim1

        def integrand(x):
            y = fun(x).squeeze()
            pvals = evaluate_orthonormal_polynomial_1d(y, ii, ab)
            # measure not included in integral because it is assumed to
            # be in the quadrature rules
            return pvals[:, ii]**2

        G_ii_ii = integrand(quad_samples).dot(quad_weights)
        ab[ii, 1] *= np.sqrt(G_ii_ii)

    return ab
예제 #6
0
def compute_multivariate_orthonormal_basis_product(product_coefs_1d,poly_index_ii,poly_index_jj,max_degrees1,max_degrees2,tol=2*np.finfo(float).eps):
    """
    Compute the product of two multivariate orthonormal bases and re-express 
    as an expansion using the orthnormal basis.
    """
    num_vars = poly_index_ii.shape[0]
    poly_index= poly_index_ii+poly_index_jj
    active_vars = np.where(poly_index>0)[0]
    if active_vars.shape[0]>0:
        coefs_1d = []
        for dd in active_vars:
            pii,pjj=poly_index_ii[dd],poly_index_jj[dd]
            if pii<pjj:
                tmp=pjj; pjj=pii; pii=tmp
            kk = flattened_rectangular_lower_triangular_matrix_index(
                pii,pjj,max_degrees1[dd]+1,max_degrees2[dd]+1)
            coefs_1d.append(product_coefs_1d[dd][kk][:,0])
        indices_1d = [np.arange(poly_index[dd]+1) 
                      for dd in active_vars]
        product_coefs = outer_product(coefs_1d)[:,np.newaxis]
        active_product_indices = cartesian_product(indices_1d)
        II = np.where(np.absolute(product_coefs)>tol)[0]
        active_product_indices = active_product_indices[:,II]
        product_coefs = product_coefs[II]
        product_indices = np.zeros(
            (num_vars,active_product_indices.shape[1]),dtype=int)
        product_indices[active_vars]=active_product_indices
    else:
        product_coefs = np.ones((1,1))
        product_indices = np.zeros([num_vars,1],dtype=int)

    return product_indices, product_coefs
예제 #7
0
def get_subspace_weights(subspace_index,
                         weights_1d,
                         config_variables_idx=None):
    """
    Get the quadrature weights of a tensor-product nodal subspace.

    Parameters
    ----------
    subspace index : np.ndarray (num_vars)
        The subspace index [l_1,...,l_d]

    weights_1d : [[np.ndarray]*num_vars]
        List of quadrature weights for each level and each variable
        Each element of inner list is np.ndarray with ndim=1. which meaans only 
        homogenous sparse grids are supported, i.e grids with same quadrature
        rule used in each dimension (level can be different per dimension 
        though).

    Return
    ------
    subspace_weights : np.ndarray (num_subspace_samples)
        The quadrature weights of the tensor-product quadrature rule of the 
        subspace.
    """
    assert subspace_index.ndim == 1
    num_vars = subspace_index.shape[0]
    if config_variables_idx is None:
        config_variables_idx = num_vars
    assert len(weights_1d) == config_variables_idx

    subspace_weights_1d = []
    constant_term = 1.
    I = np.where(subspace_index[:config_variables_idx] > 0)[0]
    subspace_weights_1d = [weights_1d[ii][subspace_index[ii]] for ii in I]

    # for all cases I have tested so far the quadrature rules weights
    # are always 1 for level 0. Using loop below takes twice as long as
    # above pythonic loop without error checks.

    # for dd in range(config_variables_idx):
    #     # integrate only over random variables. i.e. do not compute
    #     # tensor product over config variables.

    #     # only compute outer product over variables with non-zero index
    #     if subspace_index[dd]>0:
    #         # assumes level zero weight is constant
    #         subspace_weights_1d.append(weights_1d[dd][subspace_index[dd]])
    #     else:
    #         assert len(weights_1d[dd][subspace_index[dd]])==1
    #         constant_term *= weights_1d[dd][subspace_index[dd]][0]
    if len(subspace_weights_1d) > 0:
        subspace_weights = outer_product(subspace_weights_1d) * constant_term
    else:
        subspace_weights = np.ones(1) * constant_term
    return subspace_weights
예제 #8
0
    def test_inner_products_on_active_subspace_using_samples(self):

        def generate_samples(num_samples):
            from pyapprox.low_discrepancy_sequences import \
                transformed_halton_sequence
            samples = transformed_halton_sequence(None, num_vars, num_samples)
            samples = samples*2.-1.
            return samples

        num_vars = 4
        num_active_vars = 2
        degree = 3
        A = np.random.normal(0, 1, (num_vars, num_vars))
        Q, R = np.linalg.qr(A)
        W1 = Q[:, :num_active_vars]

        as_poly_indices = np.asarray([
            [0, 0], [1, 0], [0, 1], [2, 0], [1, 1], [0, 2]
        ]).T

        x1d, w1d = np.polynomial.legendre.leggauss(10)
        w1d /= 2
        gl_samples = cartesian_product([x1d]*num_vars)
        gl_weights = outer_product([w1d]*num_vars)
        as_gl_samples = np.dot(W1.T, gl_samples)

        inner_product_indices = np.empty(
            (num_active_vars, as_poly_indices.shape[1]**2), dtype=int)
        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                inner_product_indices[:, ii*as_poly_indices.shape[1]+jj] =\
                    as_poly_indices[:, ii]+as_poly_indices[:, jj]

        vandermonde = monomial_basis_matrix(
            inner_product_indices, as_gl_samples)

        num_sobol_samples = 100000
        inner_products = sample_based_inner_products_on_active_subspace(
            W1, monomial_basis_matrix, as_poly_indices, num_sobol_samples,
            generate_samples)

        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                assert np.allclose(
                    inner_products[ii, jj],
                    np.dot(vandermonde[:, ii*as_poly_indices.shape[1]+jj],
                           gl_weights), atol=1e-4)
예제 #9
0
 def integrate(self, mesh_values, order=None):
     if order is None:
         order = self.order
     # Get Gauss-Legendre rule
     gl_pts, gl_wts = gauss_jacobi_pts_wts_1D(order, 0, 0)
     pts_1d, wts_1d = [], []
     lims = self.xlim+self.ylim
     for ii in range(2):
         # Scale points from [-1,1] to to physical domain
         x_range = lims[2*ii+1]-lims[2*ii]
         # Remove factor of 0.5 from weights and shift to [a,b]
         wts_1d.append(gl_wts*x_range)
         pts_1d.append(x_range*(gl_pts+1.)/2.+lims[2*ii])
     # Interpolate mesh values onto quadrature nodes
     pts = cartesian_product(pts_1d)
     wts = outer_product(wts_1d)
     gl_vals = self.interpolate(mesh_values, pts)
     # Compute and return integral
     return np.dot(gl_vals[:, 0], wts)
예제 #10
0
    def test_moments_of_active_subspace_II(self):
        num_vars=4; num_active_vars = 2; degree=12;
        A = np.random.normal(0,1,(num_vars,num_vars))
        Q, R = np.linalg.qr( A )
        W1 = Q[:,:num_active_vars]
        
        as_poly_indices = compute_hyperbolic_indices(num_active_vars,degree,1.0)
        moments = moments_of_active_subspace(
            W1.T, as_poly_indices, monomial_mean_uniform_variables)

        x1d,w1d = np.polynomial.legendre.leggauss(10)
        w1d /=2
        gl_samples = cartesian_product([x1d]*num_vars)
        gl_weights = outer_product([w1d]*num_vars)
        as_gl_samples = np.dot(W1.T,gl_samples)

        vandermonde = monomial_basis_matrix(as_poly_indices,as_gl_samples)
        quad_poly_moments  = np.empty(vandermonde.shape[1])
        for ii in range(vandermonde.shape[1]):
            quad_poly_moments[ii] = np.dot(vandermonde[:,ii],gl_weights)
        assert np.allclose(moments,quad_poly_moments)
예제 #11
0
def plot_discrete_distribution_heatmap_2d(rv1, rv2, ax=None, zero_tol=1e-4):
    """
    Only works if rv1 and rv2 are defined on consecutive integers
    """
    import copy
    from pyapprox.utilities import outer_product
    from pyapprox.variables import get_probability_masses

    if ax is None:
        fig = plt.figure(figsize=(8, 6))
        ax = fig.add_subplot(111)
    x_1d = [get_probability_masses(rv)[0] for rv in [rv1, rv2]]
    w_1d = [get_probability_masses(rv)[1] for rv in [rv1, rv2]]
    weights = outer_product(w_1d)

    Z = np.reshape(weights, (len(x_1d[0]), len(x_1d[1])), order='F')
    Z[Z < zero_tol] = np.inf
    cmap = copy.copy(plt.cm.viridis)
    cmap.set_bad('gray', 1)
    xx = np.hstack((x_1d[0], x_1d[0].max()+1))-0.5
    yy = np.hstack((x_1d[1], x_1d[1].max()+1))-0.5
    p = ax.pcolormesh(xx, yy, Z.T, cmap=cmap)
    plt.colorbar(p, ax=ax)
예제 #12
0
    def test_discrete_induced_sampling(self):
        degree = 3

        nmasses1 = 10
        mass_locations1 = np.geomspace(1.0, 512.0, num=nmasses1)
        #mass_locations1 = np.arange(0,nmasses1)
        masses1 = np.ones(nmasses1, dtype=float) / nmasses1
        var1 = float_rv_discrete(name='float_rv_discrete',
                                 values=(mass_locations1, masses1))()

        nmasses2 = 10
        mass_locations2 = np.arange(0, nmasses2)
        # if increase from 16 unmodififed becomes ill conditioned
        masses2 = np.geomspace(1.0, 16.0, num=nmasses2)
        #masses2  = np.ones(nmasses2,dtype=float)/nmasses2

        masses2 /= masses2.sum()
        var2 = float_rv_discrete(name='float_rv_discrete',
                                 values=(mass_locations2, masses2))()

        var_trans = AffineRandomVariableTransformation([var1, var2])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(pce.num_vars(), degree, 1.0)
        pce.set_indices(indices)

        num_samples = int(1e4)
        np.random.seed(1)
        canonical_samples = generate_induced_samples(pce, num_samples)
        samples = var_trans.map_from_canonical_space(canonical_samples)

        np.random.seed(1)
        canonical_xk = [
            2 * get_distribution_info(var1)[2]['xk'] - 1,
            2 * get_distribution_info(var2)[2]['xk'] - 1
        ]
        basis_matrix_generator = partial(basis_matrix_generator_1d, pce,
                                         degree)
        canonical_samples1 = discrete_induced_sampling(
            basis_matrix_generator, pce.indices, canonical_xk,
            [var1.dist.pk, var2.dist.pk], num_samples)
        samples1 = var_trans.map_from_canonical_space(canonical_samples1)

        def density(x):
            return var1.pdf(x[0, :]) * var2.pdf(x[1, :])

        envelope_factor = 30

        def generate_proposal_samples(n):
            samples = np.vstack([var1.rvs(n), var2.rvs(n)])
            return samples

        proposal_density = density

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples2 = random_induced_measure_sampling(num_samples, pce.num_vars(),
                                                   pce.basis_matrix, density,
                                                   proposal_density,
                                                   generate_proposal_samples,
                                                   envelope_factor)

        def induced_density(x):
            vals = density(x) * christoffel_function(x, pce.basis_matrix, True)
            return vals

        from pyapprox.utilities import cartesian_product, outer_product
        from pyapprox.polynomial_sampling import christoffel_function
        quad_samples = cartesian_product([var1.dist.xk, var2.dist.xk])
        quad_weights = outer_product([var1.dist.pk, var2.dist.pk])

        #print(canonical_samples.min(axis=1),canonical_samples.max(axis=1))
        #print(samples.min(axis=1),samples.max(axis=1))
        #print(canonical_samples1.min(axis=1),canonical_samples1.max(axis=1))
        #print(samples1.min(axis=1),samples1.max(axis=1))
        # import matplotlib.pyplot as plt
        # plt.plot(quad_samples[0,:],quad_samples[1,:],'s')
        # plt.plot(samples[0,:],samples[1,:],'o')
        # plt.plot(samples1[0,:],samples1[1,:],'*')
        # plt.show()

        rtol = 1e-2
        assert np.allclose(quad_weights, density(quad_samples))
        assert np.allclose(density(quad_samples).sum(), 1)
        assert np.allclose(
            christoffel_function(quad_samples, pce.basis_matrix,
                                 True).dot(quad_weights), 1.0)
        true_induced_mean = quad_samples.dot(induced_density(quad_samples))
        print(true_induced_mean)
        print(samples.mean(axis=1))
        print(samples1.mean(axis=1))
        print(samples2.mean(axis=1))
        print(
            samples1.mean(axis=1) - true_induced_mean,
            true_induced_mean * rtol)
        #print(samples2.mean(axis=1))
        assert np.allclose(samples.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples1.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples2.mean(axis=1), true_induced_mean, rtol=rtol)
예제 #13
0
    def help_discrete_induced_sampling(self, var1, var2, envelope_factor):
        degree = 3

        var_trans = AffineRandomVariableTransformation([var1, var2])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(pce.num_vars(), degree, 1.0)
        pce.set_indices(indices)

        num_samples = int(3e4)
        np.random.seed(1)
        canonical_samples = generate_induced_samples(pce, num_samples)
        samples = var_trans.map_from_canonical_space(canonical_samples)

        np.random.seed(1)
        #canonical_xk = [2*get_distribution_info(var1)[2]['xk']-1,
        #                2*get_distribution_info(var2)[2]['xk']-1]
        xk = np.array([
            get_probability_masses(var)[0]
            for var in var_trans.variable.all_variables()
        ])
        pk = np.array([
            get_probability_masses(var)[1]
            for var in var_trans.variable.all_variables()
        ])
        canonical_xk = var_trans.map_to_canonical_space(xk)
        basis_matrix_generator = partial(basis_matrix_generator_1d, pce,
                                         degree)
        canonical_samples1 = discrete_induced_sampling(basis_matrix_generator,
                                                       pce.indices,
                                                       canonical_xk, pk,
                                                       num_samples)
        samples1 = var_trans.map_from_canonical_space(canonical_samples1)

        def univariate_pdf(var, x):
            if hasattr(var.dist, 'pdf'):
                return var.pdf(x)
            else:
                return var.pmf(x)
                xk, pk = get_probability_masses(var)
                x = np.atleast_1d(x)
                vals = np.zeros(x.shape[0])
                for jj in range(x.shape[0]):
                    for ii in range(xk.shape[0]):
                        if xk[ii] == x[jj]:
                            vals[jj] = pk[ii]
                            break
                return vals

        def density(x):
            # some issue with native scipy.pmf
            #assert np.allclose(var1.pdf(x[0, :]),var1.pmf(x[0, :]))
            return univariate_pdf(var1, x[0, :]) * univariate_pdf(
                var2, x[1, :])

        def generate_proposal_samples(n):
            samples = np.vstack([var1.rvs(n), var2.rvs(n)])
            return samples

        proposal_density = density

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples2 = random_induced_measure_sampling(num_samples, pce.num_vars(),
                                                   pce.basis_matrix, density,
                                                   proposal_density,
                                                   generate_proposal_samples,
                                                   envelope_factor)

        def induced_density(x):
            vals = density(x) * christoffel_function(x, pce.basis_matrix, True)
            return vals

        from pyapprox.utilities import cartesian_product, outer_product
        from pyapprox.polynomial_sampling import christoffel_function
        quad_samples = cartesian_product([xk[0], xk[1]])
        quad_weights = outer_product([pk[0], pk[1]])

        # print(canonical_samples.min(axis=1),canonical_samples.max(axis=1))
        # print(samples.min(axis=1),samples.max(axis=1))
        # print(canonical_samples1.min(axis=1),canonical_samples1.max(axis=1))
        # print(samples1.min(axis=1),samples1.max(axis=1))
        # import matplotlib.pyplot as plt
        # plt.plot(quad_samples[0,:],quad_samples[1,:],'s')
        # plt.plot(samples[0,:],samples[1,:],'o')
        # plt.plot(samples1[0,:],samples1[1,:],'*')
        # plt.show()

        rtol = 1e-2
        assert np.allclose(quad_weights, density(quad_samples))
        assert np.allclose(density(quad_samples).sum(), 1)
        assert np.allclose(
            christoffel_function(quad_samples, pce.basis_matrix,
                                 True).dot(quad_weights), 1.0)
        true_induced_mean = quad_samples.dot(induced_density(quad_samples))
        # print(true_induced_mean)
        # print(samples.mean(axis=1))
        # print(samples1.mean(axis=1))
        # print(samples2.mean(axis=1))
        # print(samples1.mean(axis=1)-true_induced_mean, true_induced_mean*rtol)
        # print(samples2.mean(axis=1))
        assert np.allclose(samples.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples1.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples2.mean(axis=1), true_induced_mean, rtol=rtol)
예제 #14
0
    def help_compare_prediction_based_oed(self, deviation_fun,
                                          gauss_deviation_fun,
                                          use_gauss_quadrature,
                                          ninner_loop_samples, ndesign_vars,
                                          tol):
        ncandidates_1d = 5
        design_candidates = cartesian_product(
            [np.linspace(-1, 1, ncandidates_1d)] * ndesign_vars)
        ncandidates = design_candidates.shape[1]

        # Define model used to predict likely observable data
        indices = compute_hyperbolic_indices(ndesign_vars, 1)[:, 1:]
        Amat = monomial_basis_matrix(indices, design_candidates)
        obs_fun = partial(linear_obs_fun, Amat)

        # Define model used to predict unobservable QoI
        qoi_fun = exponential_qoi_fun

        # Define the prior PDF of the unknown variables
        nrandom_vars = indices.shape[1]
        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 0.5)] * nrandom_vars)

        # Define the independent observational noise
        noise_std = 1

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])

        # Define OED options
        nouter_loop_samples = 100
        if use_gauss_quadrature:
            # 301 needed for cvar deviation
            # only 31 needed for variance deviation
            ninner_loop_samples_1d = ninner_loop_samples
            var_trans = AffineRandomVariableTransformation(prior_variable)
            x_quad, w_quad = gauss_hermite_pts_wts_1D(ninner_loop_samples_1d)
            x_quad = cartesian_product([x_quad] * nrandom_vars)
            w_quad = outer_product([w_quad] * nrandom_vars)
            x_quad = var_trans.map_from_canonical_space(x_quad)
            ninner_loop_samples = x_quad.shape[1]

            def generate_inner_prior_samples(nsamples):
                assert nsamples == x_quad.shape[1], (nsamples, x_quad.shape)
                return x_quad, w_quad
        else:
            # use default Monte Carlo sampling
            generate_inner_prior_samples = None

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])

        # Setup OED problem
        oed = BayesianBatchDeviationOED(design_candidates,
                                        obs_fun,
                                        noise_std,
                                        prior_variable,
                                        qoi_fun,
                                        nouter_loop_samples,
                                        ninner_loop_samples,
                                        generate_inner_prior_samples,
                                        deviation_fun=deviation_fun)
        oed.populate()
        oed.set_collected_design_indices(init_design_indices)

        prior_mean = oed.prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)
        selected_indices = init_design_indices

        # Generate experimental design
        nexperiments = 3
        for step in range(len(init_design_indices), nexperiments):
            # Copy current state of OED before new data is determined
            # This copy will be used to compute Laplace based utility and
            # evidence values for testing
            oed_copy = copy.deepcopy(oed)

            # Update the design
            utility_vals, selected_indices = oed.update_design()

            utility, deviations, evidences, weights = \
                oed_copy.compute_expected_utility(
                    oed_copy.collected_design_indices, selected_indices, True)

            exact_deviations = np.empty(nouter_loop_samples)
            for jj in range(nouter_loop_samples):
                # only test intermediate quantities associated with design
                # chosen by the OED step
                idx = oed.collected_design_indices
                obs_jj = oed_copy.outer_loop_obs[jj:jj + 1, idx]

                noise_cov_inv_jj = np.eye(idx.shape[0]) / noise_std**2
                exact_post_mean_jj, exact_post_cov_jj = \
                    laplace_posterior_approximation_for_linear_models(
                        Amat[idx, :],
                        prior_mean, prior_cov_inv, noise_cov_inv_jj, obs_jj.T)

                exact_deviations[jj] = gauss_deviation_fun(
                    exact_post_mean_jj, exact_post_cov_jj)
            print('d',
                  np.absolute(exact_deviations - deviations[:, 0]).max(), tol)
            # print(exact_deviations, deviations[:, 0])
            assert np.allclose(exact_deviations, deviations[:, 0], atol=tol)
            assert np.allclose(utility_vals[selected_indices],
                               -np.mean(exact_deviations),
                               atol=tol)
    def test_pce_product_of_beta_variables(self):
        def fun(x):
            return np.sqrt(x.prod(axis=0))[:, None]

        dist_alpha1, dist_beta1 = 1, 1
        dist_alpha2, dist_beta2 = dist_alpha1 + 0.5, dist_beta1
        nvars = 2

        x_1d, w_1d = [], []
        nquad_samples_1d = 100
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, dist_beta1 - 1,
                                       dist_alpha1 - 1)
        x = (x + 1) / 2
        x_1d.append(x)
        w_1d.append(w)
        x, w = gauss_jacobi_pts_wts_1D(nquad_samples_1d, dist_beta2 - 1,
                                       dist_alpha2 - 1)
        x = (x + 1) / 2
        x_1d.append(x)
        w_1d.append(w)

        quad_samples = cartesian_product(x_1d)
        quad_weights = outer_product(w_1d)

        mean = fun(quad_samples)[:, 0].dot(quad_weights)
        variance = (fun(quad_samples)[:, 0]**2).dot(quad_weights) - mean**2
        assert np.allclose(mean,
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(variance,
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).var())

        degree = 10
        poly = PolynomialChaosExpansion()
        # the distribution and ranges of univariate variables is ignored
        # when var_trans.set_identity_maps([0]) is used
        initial_variables = [stats.uniform(0, 1)]
        # TODO get quad rules from initial variables
        quad_rules = [(x, w) for x, w in zip(x_1d, w_1d)]
        univariate_variables = [
            rv_function_indpndt_vars(fun, initial_variables, quad_rules)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(tensor_product_indices([degree]))

        train_samples = (np.linspace(0, np.pi, 101)[None, :] + 1) / 2
        train_vals = train_samples.T
        coef = np.linalg.lstsq(poly.basis_matrix(train_samples),
                               train_vals,
                               rcond=None)[0]
        poly.set_coefficients(coef)
        assert np.allclose(poly.mean(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(poly.variance(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).var())

        poly = PolynomialChaosExpansion()
        initial_variables = [stats.uniform(0, 1)]
        funs = [lambda x: np.sqrt(x)] * nvars
        quad_rules = [(x, w) for x, w in zip(x_1d, w_1d)]
        # TODO get quad rules from initial variables
        univariate_variables = [
            rv_product_indpndt_vars(funs, initial_variables, quad_rules)
        ]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(tensor_product_indices([degree]))

        train_samples = (np.linspace(0, np.pi, 101)[None, :] + 1) / 2
        train_vals = train_samples.T
        coef = np.linalg.lstsq(poly.basis_matrix(train_samples),
                               train_vals,
                               rcond=None)[0]
        poly.set_coefficients(coef)
        assert np.allclose(poly.mean(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).mean())
        assert np.allclose(poly.variance(),
                           stats.beta(dist_alpha1 * 2, dist_beta1 * 2).var())