Пример #1
0
    def setup(self, num_vars, alpha_stat, beta_stat):
        def univariate_weight_function(x):
            return beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, x)

        def univariate_weight_function_deriv(x):
            return beta_pdf_derivative(alpha_stat, beta_stat, (x + 1) / 2) / 4

        weight_function = partial(evaluate_tensor_product_function,
                                  [univariate_weight_function] * num_vars)

        weight_function_deriv = partial(
            gradient_of_tensor_product_function,
            [univariate_weight_function] * num_vars,
            [univariate_weight_function_deriv] * num_vars)

        assert np.allclose((univariate_weight_function(0.5 + 1e-6) -
                            univariate_weight_function(0.5)) / 1e-6,
                           univariate_weight_function_deriv(0.5),
                           atol=1e-6)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-2, 1), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        return weight_function, weight_function_deriv, poly
 def set_polynomial_chaos_expansion(self, pce=None):
     if pce is None:
         poly_opts = define_poly_options_from_variable_transformation(
             self.variable_transformation)
         self.pce = PolynomialChaosExpansion()
         self.pce.configure(poly_opts)
     else:
         self.pce = pce
Пример #3
0
    def test_fekete_rosenblatt_interpolation(self):
        np.random.seed(2)
        degree=3

        __,__,joint_density,limits = rosenblatt_example_2d(num_samples=1)
        num_vars=len(limits)//2

        rosenblatt_opts = {'limits':limits,'num_quad_samples_1d':20}
        var_trans_1 = RosenblattTransformation(
            joint_density,num_vars,rosenblatt_opts)
        # rosenblatt maps to [0,1] but polynomials of bounded variables
        # are in [-1,1] so add second transformation for this second mapping
        var_trans_2 = define_iid_random_variable_transformation(
            uniform(),num_vars)
        var_trans = TransformationComposition([var_trans_1, var_trans_2])

        poly = PolynomialChaosExpansion()
        poly.configure({'poly_type':'jacobi','alpha_poly':0.,
                        'beta_poly':0.,'var_trans':var_trans})
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)
        
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))

        precond_func = lambda matrix, samples: christoffel_weights(matrix)
        canonical_samples, data_structures = get_fekete_samples(
            poly.canonical_basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(canonical_samples)
        assert np.allclose(
            canonical_samples,var_trans.map_to_canonical_space(samples))

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        # function = lambda x: np.sum(x**2,axis=0)[:,np.newaxis]
        # values = function(samples)
        
        # Ensure coef produce an interpolant
        coef = interpolate_fekete_samples(
            canonical_samples,values,data_structures)
        poly.set_coefficients(coef)
        
        assert np.allclose(poly(samples),values)

        # compare mean computed using quadrature and mean computed using
        # first coefficient of expansion. This is not testing that mean
        # is correct because rosenblatt transformation introduces large error
        # which makes it hard to compute accurate mean from pce or quadrature
        quad_w = get_quadrature_weights_from_fekete_samples(
            canonical_samples,data_structures)
        values_at_quad_x = values[:,0]
        assert np.allclose(
            np.dot(values_at_quad_x,quad_w),poly.mean())
    def test_lu_leja_interpolation(self):
        num_vars = 2
        degree = 15

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        def generate_candidate_samples(n): return np.cos(
            np.random.uniform(0., np.pi, (num_vars, n)))

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-1
        def precond_func(matrix, samples): return christoffel_weights(matrix)
        samples, data_structures = get_lu_leja_samples(
            poly.canonical_basis_matrix, generate_candidate_samples,
            num_candidate_samples, num_leja_samples,
            preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(samples)

        assert samples.max() <= 1 and samples.min() >= 0.

        c = np.random.uniform(0., 1., num_vars)
        c *= 20/c.sum()
        w = np.zeros_like(c)
        w[0] = np.random.uniform(0., 1., 1)
        genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w)
        values = genz_function(samples)

        # Ensure coef produce an interpolant
        coef = interpolate_lu_leja_samples(samples, values, data_structures)

        # Ignore basis functions (columns) that were not considered during the
        # incomplete LU factorization
        poly.set_indices(poly.indices[:, :num_leja_samples])
        poly.set_coefficients(coef)

        assert np.allclose(poly(samples), values)

        quad_w = get_quadrature_weights_from_lu_leja_samples(
            samples, data_structures)
        values_at_quad_x = values[:, 0]

        # will get closer if degree is increased
        # print (np.dot(values_at_quad_x,quad_w),genz_function.integrate())
        assert np.allclose(
            np.dot(values_at_quad_x, quad_w), genz_function.integrate(),
            atol=1e-4)
Пример #5
0
    def test_christoffel_function(self):
        num_vars=1
        degree=2
        alpha_poly= 0
        beta_poly=0
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        num_samples = 11
        samples = np.linspace(-1.,1.,num_samples)[np.newaxis,:]
        basis_matrix = poly.basis_matrix(samples)
        true_weights=1./np.linalg.norm(basis_matrix,axis=1)**2
        weights = 1./christoffel_function(samples,poly.basis_matrix)
        assert weights.shape[0]==num_samples
        assert np.allclose(true_weights,weights)

        # For a Gaussian quadrature rule of degree p that exactly
        # integrates all polynomials up to and including degree 2p-1
        # the quadrature weights are the christoffel function
        # evaluated at the quadrature samples
        quad_samples,quad_weights = gauss_jacobi_pts_wts_1D(
            degree,alpha_poly,beta_poly)
        quad_samples = quad_samples[np.newaxis,:]
        basis_matrix = poly.basis_matrix(quad_samples)
        weights = 1./christoffel_function(quad_samples,poly.basis_matrix)
        assert np.allclose(weights,quad_weights)
Пример #6
0
    def setup_sd_opt_problem(self, SDOptProblem):
        from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
        from pyapprox.variable_transformations import \
            define_iid_random_variable_transformation
        from pyapprox.indexing import compute_hyperbolic_indices

        num_vars = 1
        mu, sigma = 0, 1
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(mu,sigma)

        nsamples = 4
        degree = 2
        samples = np.random.normal(0, 1, (1, nsamples))
        values = f(samples[0, :])[:, np.newaxis]

        pce = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            normal_rv(mu, sigma), num_vars)
        pce.configure({'poly_type': 'hermite', 'var_trans': var_trans})
        indices = compute_hyperbolic_indices(1, degree, 1.)
        pce.set_indices(indices)

        basis_matrix = pce.basis_matrix(samples)
        probabilities = np.ones((nsamples)) / nsamples

        sd_opt_problem = SDOptProblem(basis_matrix, values[:, 0], values[:, 0],
                                      probabilities)
        return sd_opt_problem
Пример #7
0
    def test_fekete_gauss_lobatto(self):
        num_vars=1
        degree=3
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.linspace(-1.,1.,n)[np.newaxis,:]

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(-1,2),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        precond_func = lambda matrix, samples: 0.25*np.ones(matrix.shape[0])
        samples,_ = get_fekete_samples(
            poly.basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        assert samples.shape[1]==degree+1

        # The samples should be close to the Gauss-Lobatto samples
        gauss_lobatto_samples =  np.asarray(
            [-1.0, - 0.447213595499957939281834733746,
             0.447213595499957939281834733746, 1.0 ])
        assert np.allclose(np.sort(samples),gauss_lobatto_samples,atol=1e-1)
Пример #8
0
    def test_compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
            self):
        num_vars = 2
        degree = 3
        # rv_params = [[6,2],[2,6]]
        rv_params = [[1, 1]]
        leja_basename = None
        mixtures, mixture_univariate_quadrature_rules = \
            get_leja_univariate_quadrature_rules_of_beta_mixture(
                rv_params, leja_growth_rule, leja_basename)

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(-1, 2), num_vars)
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.configure(poly_opts)
        poly.set_indices(indices)

        num_mixtures = len(rv_params)
        mixture_univariate_growth_rules = [leja_growth_rule] * num_mixtures
        grammian_matrix = \
            compute_grammian_of_mixture_models_using_sparse_grid_quadrature(
                poly.basis_matrix, indices,
                mixture_univariate_quadrature_rules,
                mixture_univariate_growth_rules, num_vars)

        assert (np.all(np.isfinite(grammian_matrix)))

        if num_mixtures == 1:
            II = np.where(abs(grammian_matrix) > 1e-8)
            # check only non-zero inner-products are along diagonal, i.e.
            # for integrals of indices multiplied by themselves
            assert np.allclose(II, np.tile(np.arange(indices.shape[1]),
                                           (2, 1)))
Пример #9
0
    def test_multivariate_sampling_jacobi(self):

        num_vars = 2
        degree = 2
        alph = 1
        bet = 1.
        univ_inv = partial(idistinv_jacobi, alph=alph, bet=bet)
        num_samples = 10
        indices = np.ones((2, num_samples), dtype=int) * degree
        indices[1, :] = degree - 1
        xx = np.tile(
            np.linspace(0.01, 0.99, (num_samples))[np.newaxis, :],
            (num_vars, 1))
        samples = univ_inv(xx, indices)

        var_trans = AffineRandomVariableTransformation(
            [beta(bet + 1, alph + 1, -1, 2),
             beta(bet + 1, alph + 1, -1, 2)])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)

        reference_samples = inverse_transform_sampling_1d(
            pce.var_trans.variable.unique_variables[0],
            pce.recursion_coeffs[0], degree, xx[0, :])
        # differences are just caused by different tolerances in optimizes
        # used to find roots of CDF
        assert np.allclose(reference_samples, samples[0, :], atol=1e-7)
        reference_samples = inverse_transform_sampling_1d(
            pce.var_trans.variable.unique_variables[0],
            pce.recursion_coeffs[0], degree - 1, xx[0, :])
        assert np.allclose(reference_samples, samples[1, :], atol=1e-7)
Пример #10
0
def preconditioned_barycentric_weights():
    nmasses = 20
    xk = np.array(range(nmasses), dtype='float')
    pk = np.ones(nmasses) / nmasses
    var1 = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))()
    univariate_variables = [var1]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    var_trans = AffineRandomVariableTransformation(variable)
    growth_rule = partial(constant_increment_growth_rule, 2)
    quad_rule = get_univariate_leja_quadrature_rule(var1, growth_rule)
    samples = quad_rule(3)[0]
    num_samples = samples.shape[0]
    poly = PolynomialChaosExpansion()
    poly_opts = define_poly_options_from_variable_transformation(var_trans)
    poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-5
    poly.configure(poly_opts)
    poly.set_indices(np.arange(num_samples))

    # precond_weights = np.sqrt(
    #    (poly.basis_matrix(samples[np.newaxis,:])**2).mean(axis=1))
    precond_weights = np.ones(num_samples)

    bary_weights = compute_barycentric_weights_1d(
        samples, interval_length=samples.max() - samples.min())

    def barysum(x, y, w, f):
        x = x[:, np.newaxis]
        y = y[np.newaxis, :]
        temp = w * f / (x - y)
        return np.sum(temp, axis=1)

    def function(x):
        return np.cos(2 * np.pi * x)

    y = samples
    print(samples)
    w = precond_weights * bary_weights
    # x = np.linspace(-3,3,301)
    x = np.linspace(-1, 1, 301)
    f = function(y) / precond_weights

    # cannot interpolate on data
    II = []
    for ii, xx in enumerate(x):
        if xx in samples:
            II.append(ii)
    x = np.delete(x, II)

    r1 = barysum(x, y, w, f)
    r2 = barysum(x, y, w, 1 / precond_weights)
    interp_vals = r1 / r2
    # import matplotlib.pyplot as plt
    # plt.plot(x, interp_vals, 'k')
    # plt.plot(samples, function(samples), 'ro')
    # plt.plot(x, function(x), 'r--')
    # plt.plot(samples,function(samples),'ro')
    # print(num_samples)
    # print(precond_weights)
    print(np.linalg.norm(interp_vals - function(x)))
Пример #11
0
    def test_fekete_interpolation(self):
        num_vars=2
        degree=15

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)


        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))

        # must use canonical_basis_matrix to generate basis matrix
        precond_func = lambda matrix, samples: christoffel_weights(matrix)
        samples, data_structures = get_fekete_samples(
            poly.canonical_basis_matrix,generate_candidate_samples,
            num_candidate_samples,preconditioning_function=precond_func)
        samples = var_trans.map_from_canonical_space(samples)

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        
        # Ensure coef produce an interpolant
        coef = interpolate_fekete_samples(samples,values,data_structures)
        poly.set_coefficients(coef)
        assert np.allclose(poly(samples),values)

        quad_w = get_quadrature_weights_from_fekete_samples(
            samples,data_structures)
        values_at_quad_x = values[:,0]
        # increase degree if want smaller atol
        assert np.allclose(
            np.dot(values_at_quad_x,quad_w),genz_function.integrate(),
            atol=1e-4)
Пример #12
0
    def test_random_christoffel_sampling(self):
        num_vars = 2
        degree = 10

        alpha_poly = 1
        beta_poly = 1

        alpha_stat = beta_poly + 1
        beta_stat = alpha_poly + 1

        num_samples = int(1e4)
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_pdf = partial(stats.beta.pdf, a=alpha_stat, b=beta_stat)
        probability_density = partial(tensor_product_pdf,
                                      univariate_pdfs=univariate_pdf)

        envelope_factor = 10

        def generate_proposal_samples(n):
            return np.random.uniform(0., 1., size=(num_vars, n))

        def proposal_density(x):
            return np.ones(x.shape[1])

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples = random_induced_measure_sampling(
            num_samples, num_vars, poly.basis_matrix, probability_density,
            proposal_density, generate_proposal_samples, envelope_factor)

        def univariate_quadrature_rule(x):
            x, w = gauss_jacobi_pts_wts_1D(x, alpha_poly, beta_poly)
            x = (x + 1) / 2
            return x, w

        x, w = get_tensor_product_quadrature_rule(degree * 2 + 1, num_vars,
                                                  univariate_quadrature_rule)
        # print(samples.mean(axis=1),x.dot(w))
        assert np.allclose(
            christoffel_function(x, poly.basis_matrix, True).dot(w), 1.0)
        assert np.allclose(x.dot(w), samples.mean(axis=1), atol=1e-2)
Пример #13
0
    def test_adaptive_multivariate_sampling_jacobi(self):

        num_vars = 2
        degree = 6
        alph = 5
        bet = 5.

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, -1, 3)],
                                                  [np.arange(num_vars)]))
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, 1, 1.0)
        pce.set_indices(indices)
        cond_tol = 1e2
        samples = generate_induced_samples_migliorati_tolerance(pce, cond_tol)

        for dd in range(2, degree):
            num_prev_samples = samples.shape[1]
            new_indices = compute_hyperbolic_level_indices(num_vars, dd, 1.)
            samples = increment_induced_samples_migliorati(
                pce, cond_tol, samples, indices, new_indices)
            indices = np.hstack((indices, new_indices))
            pce.set_indices(indices)
            new_samples = samples[:, num_prev_samples:]
            prev_samples = samples[:, :num_prev_samples]
            #fig,axs = plt.subplots(1,2,figsize=(2*8,6))
            #from pyapprox.visualization import plot_2d_indices
            #axs[0].plot(prev_samples[0,:],prev_samples[1,:],'ko');
            #axs[0].plot(new_samples[0,:],new_samples[1,:],'ro');
            #plot_2d_indices(indices,other_indices=new_indices,ax=axs[1]);
            #plt.show()

        samples = var_trans.map_from_canonical_space(samples)
        cond = compute_preconditioned_basis_matrix_condition_number(
            pce.basis_matrix, samples)
        assert cond < cond_tol
Пример #14
0
def get_total_degree_polynomials(univariate_variables, degrees):
    assert type(univariate_variables[0]) == list
    assert len(univariate_variables) == len(degrees)
    polys, nparams = [], []
    for ii in range(len(degrees)):
        poly = PolynomialChaosExpansion()
        var_trans = AffineRandomVariableTransformation(
            univariate_variables[ii])
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        indices = compute_hyperbolic_indices(var_trans.num_vars(), degrees[ii],
                                             1.0)
        poly.set_indices(indices)
        polys.append(poly)
        nparams.append(indices.shape[1])
    return polys, np.array(nparams)
    def test_oli_leja_interpolation(self):
        num_vars = 2
        degree = 5

        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000

        # oli_leja requires candidates in user space
        def generate_candidate_samples(n): return (np.cos(
            np.random.uniform(0., np.pi, (num_vars, n)))+1)/2.

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-3
        def precond_func(samples): return 1./christoffel_function(
            samples, poly.basis_matrix)
        samples, data_structures = get_oli_leja_samples(
            poly, generate_candidate_samples,
            num_candidate_samples, num_leja_samples,
            preconditioning_function=precond_func)

        assert samples.max() <= 1 and samples.min() >= 0.

        # c = np.random.uniform(0., 1., num_vars)
        # c *= 20/c.sum()
        # w = np.zeros_like(c)
        # w[0] = np.random.uniform(0., 1., 1)
        # genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w)
        # values = genz_function(samples)
        # exact_integral = genz_function.integrate()

        values = np.sum(samples**2, axis=0)[:, None]
        # exact_integral = num_vars/3

        # Ensure we have produced an interpolant
        oli_solver = data_structures[0]
        poly = oli_solver.get_current_interpolant(samples, values)
        assert np.allclose(poly(samples), values)
Пример #16
0
    def test_oli_leja_interpolation(self):
        num_vars=2
        degree=5
        
        poly = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            uniform(),num_vars)
        opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(opts)
        indices = compute_hyperbolic_indices(num_vars,degree,1.0)
        poly.set_indices(indices)

        # candidates must be generated in canonical PCE space
        num_candidate_samples = 10000
        generate_candidate_samples=lambda n: np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))
        generate_candidate_samples=lambda n: (np.cos(
            np.random.uniform(0.,np.pi,(num_vars,n)))+1)/2.

        # must use canonical_basis_matrix to generate basis matrix
        num_leja_samples = indices.shape[1]-1
        precond_func = lambda samples: 1./christoffel_function(
            samples,poly.basis_matrix)
        samples, data_structures = get_oli_leja_samples(
            poly,generate_candidate_samples,
            num_candidate_samples,num_leja_samples,
            preconditioning_function=precond_func)
        #samples = var_trans.map_from_canonical_space(samples)

        assert samples.max()<=1 and samples.min()>=0.

        c = np.random.uniform(0.,1.,num_vars)
        c*=20/c.sum()
        w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1)
        genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w)
        values = genz_function(samples)
        
        # Ensure we have produced an interpolant
        oli_solver = data_structures[0]
        poly = oli_solver.get_current_interpolant(samples,values)
        assert np.allclose(poly(samples),values)
    def test_solve_linear_system_method(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 2
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)

        def univariate_quadrature_rule(n):
            x, w = gauss_jacobi_pts_wts_1D(n, beta_stat - 1, alpha_stat - 1)
            x = (x + 1) / 2.
            return x, w

        poly_moments = \
            compute_polynomial_moments_using_tensor_product_quadrature(
                pce.basis_matrix, 2*degree, num_vars,
                univariate_quadrature_rule)

        R_inv = compute_rotation_from_moments_linear_system(poly_moments)

        R_inv_gs = compute_rotation_from_moments_gram_schmidt(poly_moments)
        assert np.allclose(R_inv, R_inv_gs)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_using_tensor_product_quadrature,
            num_samples=10 * degree,
            num_vars=num_vars,
            univariate_quadrature_rule=univariate_quadrature_rule)

        apc = APC(compute_moment_matrix_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)
        assert np.allclose(R_inv, apc.R_inv)
Пример #18
0
    def test_multivariate_migliorati_sampling_jacobi(self):

        num_vars = 1
        degree = 20
        alph = 5
        bet = 5.
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, -1, 2)],
                                                  [np.arange(num_vars)]))
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)

        cond_tol = 1e1
        samples = generate_induced_samples_migliorati_tolerance(pce, cond_tol)
        cond = compute_preconditioned_basis_matrix_condition_number(
            pce.canonical_basis_matrix, samples)
        assert cond < cond_tol
Пример #19
0
    def test_least_interpolation_lu_equivalence_in_1d(self):
        num_vars = 1
        alpha_stat = 2; beta_stat  = 5
        max_num_pts = 100
        
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi',}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)

        # univariate_beta_pdf = partial(beta.pdf,a=alpha_stat,b=beta_stat)
        # univariate_pdf = lambda x: univariate_beta_pdf(x)
        # preconditioning_function = partial(
        #     tensor_product_pdf,univariate_pdfs=univariate_pdf)
        from pyapprox.indexing import get_total_degree
        max_degree = get_total_degree(num_vars,max_num_pts)
        indices = compute_hyperbolic_indices(num_vars, max_degree, 1.)
        pce.set_indices(indices)
        
        from pyapprox.polynomial_sampling import christoffel_function
        preconditioning_function = lambda samples: 1./christoffel_function(
            samples,pce.basis_matrix)
    
        oli_solver.set_preconditioning_function(preconditioning_function)
        oli_solver.set_basis_generator(basis_generator)
        
        initial_pts = None
        candidate_samples = np.linspace(0.,1.,1000)[np.newaxis,:]

        oli_solver.factorize(
            candidate_samples, initial_pts,
            num_selected_pts = max_num_pts)

        oli_samples = oli_solver.get_current_points()

        from pyapprox.utilities import truncated_pivoted_lu_factorization
        pce.set_indices(oli_solver.selected_basis_indices)
        basis_matrix = pce.basis_matrix(candidate_samples)
        weights = np.sqrt(preconditioning_function(candidate_samples))
        basis_matrix = np.dot(np.diag(weights),basis_matrix)
        L,U,p = truncated_pivoted_lu_factorization(
            basis_matrix,max_num_pts)
        assert p.shape[0]==max_num_pts
        lu_samples = candidate_samples[:,p]

        assert np.allclose(lu_samples,oli_samples)

        L1,U1,H1 = oli_solver.get_current_LUH_factors()
        
        true_permuted_matrix = (pce.basis_matrix(lu_samples).T*weights[p]).T
        assert np.allclose(np.dot(L,U),true_permuted_matrix)
        assert np.allclose(np.dot(L1,np.dot(U1,H1)),true_permuted_matrix)
    def test_compute_moment_matrix_combination_sparse_grid(self):
        """
        Test use of density_function in
        compute_moment_matrix_using_tensor_product_quadrature()
        """
        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        def univariate_pdf(x):
            return stats.beta.pdf(x, a=alpha_stat, b=beta_stat)

        density_function = partial(tensor_product_pdf,
                                   univariate_pdfs=univariate_pdf)

        true_univariate_quadrature_rule = partial(gauss_jacobi_pts_wts_1D,
                                                  alpha_poly=beta_stat - 1,
                                                  beta_poly=alpha_stat - 1)

        from pyapprox.univariate_quadrature import \
            clenshaw_curtis_in_polynomial_order, clenshaw_curtis_rule_growth
        quad_rule_opts = {
            'quad_rules': clenshaw_curtis_in_polynomial_order,
            'growth_rules': clenshaw_curtis_rule_growth,
            'unique_quadrule_indices': None
        }

        compute_grammian_function = partial(
            compute_grammian_matrix_using_combination_sparse_grid,
            var_trans=pce_var_trans,
            max_num_samples=100,
            density_function=density_function,
            quad_rule_opts=quad_rule_opts)

        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1,
            num_vars,
            true_univariate_quadrature_rule,
            transform_samples=random_var_trans.map_from_canonical_space)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)
        basis_matrix = pce.basis_matrix(samples)
        assert np.allclose(np.dot(basis_matrix.T * weights, basis_matrix),
                           compute_grammian_function(pce.basis_matrix, None))

        apc = APC(compute_grammian_function=compute_grammian_function)
        apc.configure(pce_opts)
        apc.set_indices(indices)

        apc_basis_matrix = apc.basis_matrix(samples)

        # print(np.dot(apc_basis_matrix.T*weights,apc_basis_matrix))
        assert np.allclose(
            np.dot(apc_basis_matrix.T * weights, apc_basis_matrix),
            np.eye(apc_basis_matrix.shape[1]))
Пример #21
0
def gaussian_leja_quadrature_rule(level,
                                  growth_rule=leja_growth_rule,
                                  samples_filename=None,
                                  return_weights_for_all_levels=True,
                                  initial_points=None):
    """
    Return the samples and weights of the Leja quadrature rule for the beta
    probability measure. 

    By construction these rules have polynomial ordering.

    Parameters
    ----------
    level : integer
        The level of the isotropic sparse grid.

    samples_filename : string
         Name of file to save leja samples and weights to

    Return
    ------
    ordered_samples_1d : np.ndarray (num_samples_1d)
        The reordered samples.

    ordered_weights_1d : np.ndarray (num_samples_1d)
        The reordered weights.
    """
    from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
    from pyapprox.leja_sequences import get_leja_sequence_1d,\
        get_quadrature_weights_from_samples
    num_vars = 1
    num_leja_samples = growth_rule(level)

    # freezing scipy gaussian rv like below has huge overhead
    # it creates a docstring each time which adds up to many seconds
    # for repeated calls to pdf
    from pyapprox.utilities import gaussian_pdf, gaussian_pdf_derivative
    univariate_weight_function = partial(gaussian_pdf, 0, 1)
    univariate_weight_function_deriv = partial(gaussian_pdf_derivative, 0, 1)

    weight_function = partial(evaluate_tensor_product_function,
                              [univariate_weight_function] * num_vars)

    weight_function_deriv = partial(gradient_of_tensor_product_function,
                                    [univariate_weight_function] * num_vars,
                                    [univariate_weight_function_deriv] *
                                    num_vars)

    assert np.allclose((univariate_weight_function(0.5 + 1e-8) -
                        univariate_weight_function(0.5)) / 1e-8,
                       univariate_weight_function_deriv(0.5),
                       atol=1e-6)

    poly = PolynomialChaosExpansion()
    # must be imported locally otherwise I have a circular dependency
    from pyapprox.variable_transformations import \
        define_iid_random_variable_transformation
    from scipy.stats import norm
    var_trans = define_iid_random_variable_transformation(norm(), num_vars)
    poly_opts = {'poly_type': 'hermite', 'var_trans': var_trans}
    poly.configure(poly_opts)

    if samples_filename is None or not os.path.exists(samples_filename):
        ranges = [None, None]
        if initial_points is None:
            initial_points = np.asarray([[0.0]]).T
        leja_sequence = get_leja_sequence_1d(num_leja_samples, initial_points,
                                             poly, weight_function,
                                             weight_function_deriv, ranges)
        if samples_filename is not None:
            np.savez(samples_filename, samples=leja_sequence)
    else:
        leja_sequence = np.load(samples_filename)['samples']
        assert leja_sequence.shape[1] >= growth_rule(level)
        leja_sequence = leja_sequence[:, :growth_rule(level)]

    indices = np.arange(growth_rule(level))[np.newaxis, :]
    poly.set_indices(indices)
    ordered_weights_1d = get_leja_sequence_quadrature_weights(
        leja_sequence, growth_rule, poly.basis_matrix, weight_function, level,
        return_weights_for_all_levels)
    return leja_sequence[0, :], ordered_weights_1d
Пример #22
0
def beta_leja_quadrature_rule(alpha_stat,
                              beta_stat,
                              level,
                              growth_rule=leja_growth_rule,
                              samples_filename=None,
                              return_weights_for_all_levels=True,
                              initial_points=None):
    """
    Return the samples and weights of the Leja quadrature rule for the beta
    probability measure. 

    By construction these rules have polynomial ordering.

    Parameters
    ----------
    level : integer
        The level of the isotropic sparse grid.

    alpha_stat : integer
        The alpha shape parameter of the Beta distribution

    beta_stat : integer
        The beta shape parameter of the Beta distribution

    samples_filename : string
         Name of file to save leja samples and weights to

    Return
    ------
    ordered_samples_1d : np.ndarray (num_samples_1d)
        The reordered samples.

    ordered_weights_1d : np.ndarray (num_samples_1d)
        The reordered weights.
    """
    from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
    from pyapprox.leja_sequences import get_leja_sequence_1d,\
        get_quadrature_weights_from_samples
    num_vars = 1
    num_leja_samples = growth_rule(level)
    #print(('num_leja_samples',num_leja_samples))

    # freezing beta rv like below has huge overhead
    # it creates a docstring each time which adds up to many seconds
    # for repeated calls to pdf
    #univariate_weight_function=lambda x: beta_rv(
    #    alpha_stat,beta_stat).pdf((x+1)/2)/2
    #univariate_weight_function = lambda x: beta_rv.pdf(
    #    (x+1)/2,alpha_stat,beta_stat)/2
    univariate_weight_function = lambda x: beta_pdf(alpha_stat, beta_stat,
                                                    (x + 1) / 2) / 2
    univariate_weight_function_deriv = lambda x: beta_pdf_derivative(
        alpha_stat, beta_stat, (x + 1) / 2) / 4

    weight_function = partial(evaluate_tensor_product_function,
                              [univariate_weight_function] * num_vars)

    weight_function_deriv = partial(gradient_of_tensor_product_function,
                                    [univariate_weight_function] * num_vars,
                                    [univariate_weight_function_deriv] *
                                    num_vars)

    # assert np.allclose(
    #     (univariate_weight_function(0.5+1e-8)-
    #          univariate_weight_function(0.5))/1e-8,
    #     univariate_weight_function_deriv(0.5),atol=1e-6)

    poly = PolynomialChaosExpansion()
    # must be imported locally otherwise I have a circular dependency
    from pyapprox.variable_transformations import \
        define_iid_random_variable_transformation
    from scipy.stats import uniform
    var_trans = define_iid_random_variable_transformation(
        uniform(-1, 2), num_vars)

    poly_opts = {
        'poly_type': 'jacobi',
        'alpha_poly': beta_stat - 1,
        'beta_poly': alpha_stat - 1,
        'var_trans': var_trans
    }
    poly.configure(poly_opts)

    if samples_filename is None or not os.path.exists(samples_filename):
        ranges = [-1, 1]
        from scipy.stats import beta as beta_rv
        if initial_points is None:
            initial_points = np.asarray(
                [[2 * beta_rv(alpha_stat, beta_stat).ppf(0.5) - 1]]).T
        leja_sequence = get_leja_sequence_1d(num_leja_samples, initial_points,
                                             poly, weight_function,
                                             weight_function_deriv, ranges)
        if samples_filename is not None:
            np.savez(samples_filename, samples=leja_sequence)
    else:
        leja_sequence = np.load(samples_filename)['samples']
        #print (leja_sequence.shape[1],growth_rule(level),level)
        assert leja_sequence.shape[1] >= growth_rule(level)
        leja_sequence = leja_sequence[:, :growth_rule(level)]

    indices = np.arange(growth_rule(level))[np.newaxis, :]
    poly.set_indices(indices)
    ordered_weights_1d = get_leja_sequence_quadrature_weights(
        leja_sequence, growth_rule, poly.basis_matrix, weight_function, level,
        return_weights_for_all_levels)
    return leja_sequence[0, :], ordered_weights_1d
Пример #23
0
    def test_discrete_induced_sampling(self):
        degree = 3

        nmasses1 = 10
        mass_locations1 = np.geomspace(1.0, 512.0, num=nmasses1)
        #mass_locations1 = np.arange(0,nmasses1)
        masses1 = np.ones(nmasses1, dtype=float) / nmasses1
        var1 = float_rv_discrete(name='float_rv_discrete',
                                 values=(mass_locations1, masses1))()

        nmasses2 = 10
        mass_locations2 = np.arange(0, nmasses2)
        # if increase from 16 unmodififed becomes ill conditioned
        masses2 = np.geomspace(1.0, 16.0, num=nmasses2)
        #masses2  = np.ones(nmasses2,dtype=float)/nmasses2

        masses2 /= masses2.sum()
        var2 = float_rv_discrete(name='float_rv_discrete',
                                 values=(mass_locations2, masses2))()

        var_trans = AffineRandomVariableTransformation([var1, var2])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(pce.num_vars(), degree, 1.0)
        pce.set_indices(indices)

        num_samples = int(1e4)
        np.random.seed(1)
        canonical_samples = generate_induced_samples(pce, num_samples)
        samples = var_trans.map_from_canonical_space(canonical_samples)

        np.random.seed(1)
        canonical_xk = [
            2 * get_distribution_info(var1)[2]['xk'] - 1,
            2 * get_distribution_info(var2)[2]['xk'] - 1
        ]
        basis_matrix_generator = partial(basis_matrix_generator_1d, pce,
                                         degree)
        canonical_samples1 = discrete_induced_sampling(
            basis_matrix_generator, pce.indices, canonical_xk,
            [var1.dist.pk, var2.dist.pk], num_samples)
        samples1 = var_trans.map_from_canonical_space(canonical_samples1)

        def density(x):
            return var1.pdf(x[0, :]) * var2.pdf(x[1, :])

        envelope_factor = 30

        def generate_proposal_samples(n):
            samples = np.vstack([var1.rvs(n), var2.rvs(n)])
            return samples

        proposal_density = density

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples2 = random_induced_measure_sampling(num_samples, pce.num_vars(),
                                                   pce.basis_matrix, density,
                                                   proposal_density,
                                                   generate_proposal_samples,
                                                   envelope_factor)

        def induced_density(x):
            vals = density(x) * christoffel_function(x, pce.basis_matrix, True)
            return vals

        from pyapprox.utilities import cartesian_product, outer_product
        from pyapprox.polynomial_sampling import christoffel_function
        quad_samples = cartesian_product([var1.dist.xk, var2.dist.xk])
        quad_weights = outer_product([var1.dist.pk, var2.dist.pk])

        #print(canonical_samples.min(axis=1),canonical_samples.max(axis=1))
        #print(samples.min(axis=1),samples.max(axis=1))
        #print(canonical_samples1.min(axis=1),canonical_samples1.max(axis=1))
        #print(samples1.min(axis=1),samples1.max(axis=1))
        # import matplotlib.pyplot as plt
        # plt.plot(quad_samples[0,:],quad_samples[1,:],'s')
        # plt.plot(samples[0,:],samples[1,:],'o')
        # plt.plot(samples1[0,:],samples1[1,:],'*')
        # plt.show()

        rtol = 1e-2
        assert np.allclose(quad_weights, density(quad_samples))
        assert np.allclose(density(quad_samples).sum(), 1)
        assert np.allclose(
            christoffel_function(quad_samples, pce.basis_matrix,
                                 True).dot(quad_weights), 1.0)
        true_induced_mean = quad_samples.dot(induced_density(quad_samples))
        print(true_induced_mean)
        print(samples.mean(axis=1))
        print(samples1.mean(axis=1))
        print(samples2.mean(axis=1))
        print(
            samples1.mean(axis=1) - true_induced_mean,
            true_induced_mean * rtol)
        #print(samples2.mean(axis=1))
        assert np.allclose(samples.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples1.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples2.mean(axis=1), true_induced_mean, rtol=rtol)
Пример #24
0
    def help_test_stochastic_dominance(self,
                                       solver,
                                       nsamples,
                                       degree,
                                       disutility=None,
                                       plot=False):
        """
        disutilty is none plot emprical CDF
        disutility is True plot disutility SSD
        disutility is False plot standard SSD
        """
        from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
        from pyapprox.variable_transformations import \
            define_iid_random_variable_transformation
        from pyapprox.indexing import compute_hyperbolic_indices
        num_vars = 1
        mu, sigma = 0, 1
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(mu,sigma)

        samples = np.random.normal(0, 1, (1, nsamples))
        samples = np.sort(samples)
        values = f(samples[0, :])[:, np.newaxis]

        pce = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            normal_rv(mu, sigma), num_vars)
        pce.configure({'poly_type': 'hermite', 'var_trans': var_trans})
        indices = compute_hyperbolic_indices(1, degree, 1.)
        pce.set_indices(indices)

        eta_indices = None
        #eta_indices=np.argsort(values[:,0])[nsamples//2:]
        coef, sd_opt_problem = solver(samples,
                                      values,
                                      pce.basis_matrix,
                                      eta_indices=eta_indices)

        pce.set_coefficients(coef[:, np.newaxis])
        pce_values = pce(samples)[:, 0]

        ygrid = pce_values.copy()
        if disutility is not None:
            if disutility:
                ygrid = -ygrid[::-1]
            stat_function = partial(compute_conditional_expectations,
                                    ygrid,
                                    disutility_formulation=disutility)
            if disutility:
                # Disutility SSD
                eps = 1e-14
                assert np.all(
                    stat_function(values[:, 0]) <= stat_function(pce_values) +
                    eps)
            else:
                # SSD
                assert np.all(
                    stat_function(pce_values) <= stat_function(values[:, 0]))

        else:
            # FSD
            from pyapprox.density import EmpiricalCDF
            stat_function = lambda x: EmpiricalCDF(x)(ygrid)
            assert np.all(
                stat_function(pce_values) <= stat_function(values[:, 0]))

        if plot:
            lstsq_pce = PolynomialChaosExpansion()
            lstsq_pce.configure({
                'poly_type': 'hermite',
                'var_trans': var_trans
            })
            lstsq_pce.set_indices(indices)

            lstsq_coef = solve_least_squares_regression(
                samples, values, lstsq_pce.basis_matrix)
            lstsq_pce.set_coefficients(lstsq_coef)

            #axs[1].plot(ygrid,stat_function(values[:,0]),'ko',ms=12)
            #axs[1].plot(ygrid,stat_function(pce_values),'rs')
            #axs[1].plot(ygrid,stat_function(lstsq_pce(samples)[:,0]),'b*')

            ylb,yub = values.min()-abs(values.max())*.1,\
                      values.max()+abs(values.max())*.1

            ygrid = np.linspace(ylb, yub, 101)
            ygrid = np.sort(np.concatenate([ygrid, pce_values]))
            if disutility is not None:
                if disutility:
                    ygrid = -ygrid[::-1]
                stat_function = partial(compute_conditional_expectations,
                                        ygrid,
                                        disutility_formulation=disutility)
            else:
                print('here')
                print(ygrid)

                def stat_function(x):
                    assert x.ndim == 1
                    #vals = sd_opt_problem.smoother1(
                    #x[np.newaxis,:]-ygrid[:,np.newaxis]).mean(axis=1)
                    vals = EmpiricalCDF(x)(ygrid)
                    return vals

            fig, axs = plot_1d_functions_and_statistics(
                [f, pce, lstsq_pce], ['Exact', 'SSD', 'Lstsq'], samples,
                values, stat_function, ygrid)

            plt.show()
Пример #25
0
    def help_discrete_induced_sampling(self, var1, var2, envelope_factor):
        degree = 3

        var_trans = AffineRandomVariableTransformation([var1, var2])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(pce.num_vars(), degree, 1.0)
        pce.set_indices(indices)

        num_samples = int(3e4)
        np.random.seed(1)
        canonical_samples = generate_induced_samples(pce, num_samples)
        samples = var_trans.map_from_canonical_space(canonical_samples)

        np.random.seed(1)
        #canonical_xk = [2*get_distribution_info(var1)[2]['xk']-1,
        #                2*get_distribution_info(var2)[2]['xk']-1]
        xk = np.array([
            get_probability_masses(var)[0]
            for var in var_trans.variable.all_variables()
        ])
        pk = np.array([
            get_probability_masses(var)[1]
            for var in var_trans.variable.all_variables()
        ])
        canonical_xk = var_trans.map_to_canonical_space(xk)
        basis_matrix_generator = partial(basis_matrix_generator_1d, pce,
                                         degree)
        canonical_samples1 = discrete_induced_sampling(basis_matrix_generator,
                                                       pce.indices,
                                                       canonical_xk, pk,
                                                       num_samples)
        samples1 = var_trans.map_from_canonical_space(canonical_samples1)

        def univariate_pdf(var, x):
            if hasattr(var.dist, 'pdf'):
                return var.pdf(x)
            else:
                return var.pmf(x)
                xk, pk = get_probability_masses(var)
                x = np.atleast_1d(x)
                vals = np.zeros(x.shape[0])
                for jj in range(x.shape[0]):
                    for ii in range(xk.shape[0]):
                        if xk[ii] == x[jj]:
                            vals[jj] = pk[ii]
                            break
                return vals

        def density(x):
            # some issue with native scipy.pmf
            #assert np.allclose(var1.pdf(x[0, :]),var1.pmf(x[0, :]))
            return univariate_pdf(var1, x[0, :]) * univariate_pdf(
                var2, x[1, :])

        def generate_proposal_samples(n):
            samples = np.vstack([var1.rvs(n), var2.rvs(n)])
            return samples

        proposal_density = density

        # unlike fekete and leja sampling can and should use
        # pce.basis_matrix here. If use canonical_basis_matrix then
        # densities must be mapped to this space also which can be difficult
        samples2 = random_induced_measure_sampling(num_samples, pce.num_vars(),
                                                   pce.basis_matrix, density,
                                                   proposal_density,
                                                   generate_proposal_samples,
                                                   envelope_factor)

        def induced_density(x):
            vals = density(x) * christoffel_function(x, pce.basis_matrix, True)
            return vals

        from pyapprox.utilities import cartesian_product, outer_product
        from pyapprox.polynomial_sampling import christoffel_function
        quad_samples = cartesian_product([xk[0], xk[1]])
        quad_weights = outer_product([pk[0], pk[1]])

        # print(canonical_samples.min(axis=1),canonical_samples.max(axis=1))
        # print(samples.min(axis=1),samples.max(axis=1))
        # print(canonical_samples1.min(axis=1),canonical_samples1.max(axis=1))
        # print(samples1.min(axis=1),samples1.max(axis=1))
        # import matplotlib.pyplot as plt
        # plt.plot(quad_samples[0,:],quad_samples[1,:],'s')
        # plt.plot(samples[0,:],samples[1,:],'o')
        # plt.plot(samples1[0,:],samples1[1,:],'*')
        # plt.show()

        rtol = 1e-2
        assert np.allclose(quad_weights, density(quad_samples))
        assert np.allclose(density(quad_samples).sum(), 1)
        assert np.allclose(
            christoffel_function(quad_samples, pce.basis_matrix,
                                 True).dot(quad_weights), 1.0)
        true_induced_mean = quad_samples.dot(induced_density(quad_samples))
        # print(true_induced_mean)
        # print(samples.mean(axis=1))
        # print(samples1.mean(axis=1))
        # print(samples2.mean(axis=1))
        # print(samples1.mean(axis=1)-true_induced_mean, true_induced_mean*rtol)
        # print(samples2.mean(axis=1))
        assert np.allclose(samples.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples1.mean(axis=1), true_induced_mean, rtol=rtol)
        assert np.allclose(samples2.mean(axis=1), true_induced_mean, rtol=rtol)
Пример #26
0
def helper_least_factorization(pts, model, var_trans, pce_opts, oli_opts,
                               basis_generator,
                               max_num_pts=None, initial_pts=None,
                               pce_degree=None,
                               preconditioning_function=None,
                               verbose=False,
                               points_non_degenerate=False,
                               exact_mean=None):

    num_vars = pts.shape[0]

    pce = PolynomialChaosExpansion()
    pce.configure(pce_opts)

    oli_solver = LeastInterpolationSolver()
    oli_solver.configure(oli_opts)
    oli_solver.set_pce(pce)
    
    if preconditioning_function is not None:
        oli_solver.set_preconditioning_function(preconditioning_function)
        
    oli_solver.set_basis_generator(basis_generator)

    if max_num_pts is None:
        max_num_pts = pts.shape[1]

    if initial_pts is not None:
        # find unique set of points and separate initial pts from pts
        # this allows for cases when
        # (1) pts intersect initial_pts = empty
        # (2) pts intersect initial_pts = initial pts
        # (3) 0 < #(pts intersect initial_pts) < #initial_pts
        pts = remove_common_rows([pts.T,initial_pts.T]).T

    oli_solver.factorize(
        pts, initial_pts,
        num_selected_pts = max_num_pts)

    permuted_pts = oli_solver.get_current_points()

    permuted_vals = model( permuted_pts )
    pce = oli_solver.get_current_interpolant(
        permuted_pts, permuted_vals)

    assert permuted_pts.shape[1] == max_num_pts

    # Ensure pce interpolates the training data
    pce_vals = pce.value( permuted_pts )
    assert np.allclose( permuted_vals, pce_vals )

    # Ensure pce exactly approximates the polynomial test function (model)
    test_pts = generate_independent_random_samples(var_trans.variable,num_samples=10)
    test_vals = model(test_pts)
    #print 'p',test_pts.T
    pce_vals = pce.value(test_pts)
    L,U,H=oli_solver.get_current_LUH_factors()
    #print L
    #print U
    #print test_vals
    #print pce_vals
    #print 'coeff',pce.get_coefficients()
    #print oli_solver.selected_basis_indices
    assert np.allclose( test_vals, pce_vals )
    
    if initial_pts is not None:
        temp = remove_common_rows([permuted_pts.T,initial_pts.T]).T
        assert temp.shape[1]==max_num_pts-initial_pts.shape[1]
        if oli_solver.enforce_ordering_of_initial_points:
            assert np.allclose(
                initial_pts,permuted_pts[:,:initial_pts.shape[0]])
        elif not oli_solver.get_initial_points_degenerate():
            assert allclose_unsorted_matrix_rows(
                initial_pts.T, permuted_pts[:,:initial_pts.shape[1]].T)
        else:
            # make sure that oli tried again to add missing initial
            # points after they were found to be degenerate
            # often adding one new point will remove degeneracy
            assert oli_solver.get_num_initial_points_selected()==\
              initial_pts.shape[1]
            P = oli_solver.get_current_permutation()
            I = np.where(P<initial_pts.shape[1])[0]
            assert_allclose_unsorted_matrix_cols(
                initial_pts,permuted_pts[:,I])

    basis_generator = oli_solver.get_basis_generator()
    max_degree = oli_solver.get_current_degree()
    basis_cardinality = oli_solver.get_basis_cardinality()
    num_terms = 0
    for degree in range(max_degree):
        __,indices = basis_generator(num_vars,degree)
        num_terms += indices.shape[1]
        assert num_terms == basis_cardinality[degree]

    if points_non_degenerate:
        degree_list = oli_solver.get_points_to_degree_map()
        num_terms = 1
        degree = 0
        num_pts = permuted_pts.shape[1]
        for i in range(num_pts):
            # test assumes non-degeneracy
            if i>=num_terms:
                degree+=1
                indices = PolyIndexVector()
                basis_generator.get_degree_basis_indices(
                    num_vars,degree,indices)
                num_terms += indices.size()
            assert degree_list[i] == degree

    if exact_mean is not None:
        mean = pce.get_coefficients()[0,0]
        assert np.allclose(mean,exact_mean)
Пример #27
0
def approximate_polynomial_chaos(train_samples,
                                 train_vals,
                                 verbosity=0,
                                 basis_type='expanding_basis',
                                 variable=None,
                                 options=None):
    r"""
    Compute a Polynomial Chaos Expansion of a function from a fixed data set.

    Parameters
    ----------
    train_samples : np.ndarray (nvars,nsamples)
        The inputs of the function used to train the approximation

    train_vals : np.ndarray (nvars,nsamples)
        The values of the function at ``train_samples``

    basis_type : string
        Type of approximation. Should be one of

        - 'expanding_basis' see :func:`pyapprox.approximate.cross_validate_pce_degree` 
        - 'hyperbolic_cross' see :func:`pyapprox.approximate.expanding_basis_omp_pce`

    variable : pya.IndependentMultivariateRandomVariable
        Object containing information of the joint density of the inputs z.
        This is used to generate random samples from this join density

    verbosity : integer
        Controls the amount of information printed to screen


    Returns
    -------
    pce : :class:`pyapprox.multivariate_polynomials.PolynomialChaosExpansion`
        The PCE approximation
    """
    funcs = {
        'expanding_basis': expanding_basis_omp_pce,
        'hyperbolic_cross': cross_validate_pce_degree
    }
    if variable is None:
        msg = 'pce requires that variable be defined'
        raise Exception(msg)
    if basis_type not in funcs:
        msg = f'Basis type {basis_type} not found.\n Available types are:\n'
        for key in funcs.keys():
            msg += f"\t{key}\n"
        raise Exception(msg)

    from pyapprox.multivariate_polynomials import PolynomialChaosExpansion, \
        define_poly_options_from_variable_transformation
    var_trans = AffineRandomVariableTransformation(variable)
    poly = PolynomialChaosExpansion()
    poly_opts = define_poly_options_from_variable_transformation(var_trans)
    poly.configure(poly_opts)

    if options is None:
        options = {}

    res = funcs[basis_type](poly, train_samples, train_vals, **options)[0]
    return res
Пример #28
0
    def test_factorization_using_exact_algebra(self):

        num_vars = 2
        alpha_stat = 2; beta_stat  = 5
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat,-2,1),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi'}

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))
            
        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)
        oli_solver.set_basis_generator(basis_generator)
        

        # Define 4 candidate points so no pivoting is necessary
        from numpy import sqrt, dot, sum, array, zeros
        from numpy.linalg import norm
        candidate_pts = array([[-1.,1./sqrt(2.),-1./sqrt(2.),0.],
                                   [-1.,-1./sqrt(2.),0.,0.]] )

        U = np.zeros((4,4))

        factor_history = []

        # Build vandermonde matrix for all degrees ahead of time
        degree = 2
        indices = compute_hyperbolic_indices(num_vars,degree,1.)
        pce.set_indices(indices)
        V = pce.basis_matrix(candidate_pts)

        ##--------------------- ##
        ## S=1                  ##
        ##--------------------- ##

        #print 'V\n',V

        #print '################################'
        U1 = array([[V[0,1],V[0,2]],
                     [V[1,1]-V[0,1],V[1,2]-V[0,2]],
                     [V[2,1]-V[0,1],V[2,2]-V[0,2]],
                     [V[3,1]-V[0,1],V[3,2]-V[0,2]]])

        norms = [sqrt((V[1,1]-V[0,1])**2+(V[1,2]-V[0,2])**2),
                  sqrt((V[2,1]-V[0,1])**2+(V[2,2]-V[0,2])**2),
                  sqrt((V[3,1]-V[0,1])**2+(V[3,2]-V[0,2])**2)]
        U1[1,:] /= norms[0]
        #print 'U1\n',U1

        #print 'norms\n', norms

        magic_row = array([[(V[1,1]-V[0,1])/norms[0],(V[1,2]-V[0,2])/norms[0]]])
        #print 'magic_row\n',magic_row

        inner_products = array([(V[1,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[1,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0],
                                 (V[2,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[2,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0],
                                 (V[3,1]-V[0,1])*(V[1,1]-V[0,1])/norms[0]+
                                 (V[3,2]-V[0,2])*(V[1,2]-V[0,2])/norms[0]])
        #print 'inner_products\n', inner_products

        v1 = inner_products
        L = np.array([[1,0,0,0],[0,v1[0],v1[1],v1[2]]]).T
        #print 'L\n',L

        Z = array([[V[0,1]*(V[1,1]-V[0,1])/norms[0]+V[0,2]*(V[1,2]-V[0,2])/norms[0]]])
        #print 'Z\n',Z

        U=array([[1,Z[0,0]],[0,1]])
        #print 'U\n',U

        factor_history.append((L,U))

        ##--------------------- ##
        ## S=2                  ##
        ##--------------------- ##

        #print '################################'
        U2 = array([[V[0,1],V[0,2]],
                     [(V[1,1]-V[0,1])/L[1,1],(V[1,2]-V[0,2])/L[1,1]],
                     [(V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1],
                      (V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]],
                     [(V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1],
                      (V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1]]])

        #print 'U2\n',U2

        norms = [sqrt(((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])**2+
                   ((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])**2),
                 sqrt(((V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1])**2+
                   ((V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1])**2)]
        U2[2,:] /= norms[0]
        #print 'U2\n',U2

        #print 'norms\n', norms

        magic_row = array([(V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1],
                            (V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]])/norms[0]
        #print 'magic_row', magic_row

        inner_products = [norms[0],((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])*((V[3,1]-V[0,1])-L[3,1]*(V[1,1]-V[0,1])/L[1,1])/norms[0]+((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])*((V[3,2]-V[0,2])-L[3,1]*(V[1,2]-V[0,2])/L[1,1])/norms[0]]
        #print 'inner_products',inner_products

        v2 = inner_products
        L = np.array([[1,0,0,0],[0,v1[0],v1[1],v1[2]],[0,0,v2[0],v2[1]]]).T
        #print 'L\n',L

        Z = [V[0,1]/norms[0]*((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])+
              V[0,2]/norms[0]*((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1]),
              (V[1,1]-V[0,1])/(L[1,1]*norms[0])*((V[2,1]-V[0,1])-L[2,1]*(V[1,1]-V[0,1])/L[1,1])+
              (V[1,2]-V[0,2])/(L[1,1]*norms[0])*((V[2,2]-V[0,2])-L[2,1]*(V[1,2]-V[0,2])/L[1,1])]
        #print 'Z\n',Z

        U_prev = U.copy(); U = zeros( ( 3,3 ) ); U[:2,:2] = U_prev
        U[:2,2] = Z; U[2,2]=1
        #print 'U\n', U

        factor_history.append((L,U))

        ##--------------------- ##
        ## S=3                  ##
        ##--------------------- ##

        #print '################################'
        U3 = array([[V[0,3],V[0,4],V[0,5]],
                     [(V[1,3]-V[0,3])/L[1,1],(V[1,4]-V[0,4])/L[1,1],(V[1,5]-V[0,5])/L[1,1]],
                     [((V[2,3]-V[0,3])-L[2,1]*(V[1,3]-V[0,3])/L[1,1])/L[2,2],((V[2,4]-V[0,4])-L[2,1]*(V[1,4]-V[0,4])/L[1,1])/L[2,2],((V[2,5]-V[0,5])-L[2,1]*(V[1,5]-V[0,5])/L[1,1])/L[2,2]],
                     [(V[3,3]-V[0,3])-L[3,1]*(V[1,3]-V[0,3])/L[1,1]-L[3,2]/L[2,2]*(V[2,3]-V[0,3]-L[2,1]/L[1,1]*(V[1,3]-V[0,3])),(V[3,4]-V[0,4])-L[3,1]*(V[1,4]-V[0,4])/L[1,1]-L[3,2]/L[2,2]*(V[2,4]-V[0,4]-L[2,1]/L[1,1]*(V[1,4]-V[0,4])),(V[3,5]-V[0,5])-L[3,1]*(V[1,5]-V[0,5])/L[1,1]-L[3,2]/L[2,2]*(V[2,5]-V[0,5]-L[2,1]/L[1,1]*(V[1,5]-V[0,5]))]])

        norms = [norm(U3[3,:])]

        U3[3,:] /= norms[0]
        #print 'U3\n', U3

        #print 'norms\n', norms

        magic_row = array([U3[3,:]])
        #print 'magic_row', magic_row

        inner_products = [norms[0]]
        #print 'inner_products\n', inner_products

        L_prev = L.copy(); L = zeros( (4,4) ); L[:,:3] = L_prev;
        L[3,3] = inner_products[0]
        #print 'L\n', L

        Z = dot( U3[:3,:3], magic_row.T )
        #print 'Z\n',Z

        U_prev = U.copy(); U = zeros( ( 4,4 ) ); U[:3,:3] = U_prev
        U[:3,3] = Z.squeeze(); U[3,3]=1
        #print 'U\n',U
        #assert False

        factor_history.append((L,U))


        candidate_pts = array([[-1.,1./sqrt(2.),-1./sqrt(2.),0.],
                                [-1.,-1./sqrt(2.),0.,0.]] )

        # define target function
        model = lambda x: np.asarray([x[0]**2 + x[1]**2 +  x[0]*x[1]]).T

        #num_starting_pts = 5
        num_starting_pts = 1
        initial_pts = None
        oli_solver.factorize(
            candidate_pts, initial_pts, num_selected_pts=num_starting_pts )


        L,U,H=oli_solver.get_current_LUH_factors()
        #print 'L\n',L
        #print 'U\n',U
        #print 'H\n',H
        it = 0
        np.allclose(L[:1,:1],factor_history[it][0])
        np.allclose(U[:1,:1],factor_history[it][0])

        current_pts = oli_solver.get_current_points()
        current_vals = model(current_pts)

        num_pts = current_pts.shape[1]
        num_pts_prev = current_pts.shape[1]
        max_num_pts =  candidate_pts.shape[1]
        finalize = False
        while not finalize:
            if ( ( num_pts == max_num_pts-1) or
                (num_pts == candidate_pts.shape[1]) ):
                 finalize = True

            oli_solver.update_factorization(1)


            L,U,H=oli_solver.get_current_LUH_factors()
            #print '###########'
            #print 'L\n',L
            #print 'U\n',U
            #print 'H\n',H
            np.allclose(L,
                    factor_history[it][0][:L.shape[0],:L.shape[1]])
            np.allclose(U,
                    factor_history[it][1][:U.shape[0],:U.shape[1]])
            it += 1

            num_pts_prev = num_pts
            num_pts = oli_solver.num_points_added()
            if  ( num_pts > num_pts_prev ):
                #print 'number of points', num_pts
                current_pt = oli_solver.get_last_point_added()
                current_val = model(current_pt)
                current_pts = np.hstack(
                    ( current_pts, current_pt.reshape( current_pt.shape[0], 1 ) ) )
                current_vals = np.vstack( ( current_vals, current_val ) )
                pce = oli_solver.get_current_interpolant(
                    current_pts, current_vals)
                current_pce_vals = pce.value(current_pts)
                assert np.allclose(current_pce_vals, current_vals)
Пример #29
0
samples_adjust = np.zeros((3, samples.shape[1]))
samples_adjust[0, :] = np.prod(samples[index_product[0], :], axis=0)
samples_adjust[1, :] = samples[2, :]
samples_adjust[2, :] = np.prod(samples[index_product[1], :], axis=0)


# the following use the product of uniforms to define basis
from pyapprox.variables import get_distribution_info
from pyapprox.univariate_quadrature import gauss_jacobi_pts_wts_1D
from pyapprox.utilities import total_degree_space_dimension

def identity_fun(x):
    return x

degree = 3
poly = PolynomialChaosExpansion()
basis_opts = dict()
identity_map_indices = []
cnt = 0
for ii in range(re_variable.nunique_vars):
    rv = re_variable.unique_variables[ii]
    name, scales, shapes = get_distribution_info(rv)
    if ii not in [0, 2]:
        opts = {'rv_type': name, 'shapes': shapes,
                'var_nums': re_variable.unique_variable_indices[ii]}
        basis_opts['basis%d' % ii] = opts
        continue

    #identity_map_indices += re_variable.unique_variable_indices[ii] # wrong
    identity_map_indices += list(re_variable.unique_variable_indices[ii]) # right
    
class AdaptiveInducedPCE(SubSpaceRefinementManager):
    def __init__(self, num_vars, cond_tol=1e2):
        super(AdaptiveInducedPCE, self).__init__(num_vars)
        self.cond_tol = cond_tol
        self.fit_opts = {'omp_tol': 0}
        self.set_preconditioning_function(chistoffel_preconditioning_function)
        self.fit_function = self._fit
        if cond_tol < 1:
            self.induced_sampling = False
            self.set_preconditioning_function(
                precond_func=lambda m, x: np.ones(x.shape[1]))
            self.sample_ratio = 5
        else:
            self.induced_sampling = True
            self.sample_ratio = None

    def set_function(self, function, var_trans=None, pce=None):
        super(AdaptiveInducedPCE, self).set_function(function, var_trans)
        self.set_polynomial_chaos_expansion(pce)

    def set_polynomial_chaos_expansion(self, pce=None):
        if pce is None:
            poly_opts = define_poly_options_from_variable_transformation(
                self.variable_transformation)
            self.pce = PolynomialChaosExpansion()
            self.pce.configure(poly_opts)
        else:
            self.pce = pce

    def increment_samples(self, current_poly_indices, unique_poly_indices):
        if self.induced_sampling:
            samples = increment_induced_samples_migliorati(
                self.pce, self.cond_tol, self.samples, current_poly_indices,
                unique_poly_indices)
        else:
            samples = generate_independent_random_samples(
                self.pce.var_trans.variable,
                self.sample_ratio * unique_poly_indices.shape[1])
            samples = self.pce.var_trans.map_to_canonical_space(samples)
            samples = np.hstack([self.samples, samples])
        return samples

    def allocate_initial_samples(self):
        if self.induced_sampling:
            return generate_induced_samples_migliorati_tolerance(
                self.pce, self.cond_tol)
        else:
            return generate_independent_random_samples(
                self.pce.var_trans.variable,
                self.sample_ratio * self.pce.num_terms())

    def create_new_subspaces_data(self, new_subspace_indices):
        num_current_subspaces = self.subspace_indices.shape[1]
        self.initialize_subspaces(new_subspace_indices)

        self.pce.set_indices(self.poly_indices)
        if self.samples.shape[1] == 0:
            unique_subspace_samples = self.allocate_initial_samples()
            return unique_subspace_samples, np.array(
                [unique_subspace_samples.shape[1]])

        num_vars, num_new_subspaces = new_subspace_indices.shape
        unique_poly_indices = np.zeros((num_vars, 0), dtype=int)
        for ii in range(num_new_subspaces):
            I = get_subspace_active_poly_array_indices(
                self, num_current_subspaces + ii)
            unique_poly_indices = np.hstack(
                [unique_poly_indices, self.poly_indices[:, I]])

        # current_poly_indices will include active indices not added
        # during this call, i.e. in new_subspace_indices.
        # thus cannot use
        # I = get_active_poly_array_indices(self)
        # unique_poly_indices = self.poly_indices[:,I]
        # to replace above loop
        current_poly_indices = self.poly_indices[:, :self.
                                                 unique_poly_indices_idx[
                                                     num_current_subspaces]]
        num_samples = self.samples.shape[1]
        samples = self.increment_samples(current_poly_indices,
                                         unique_poly_indices)
        unique_subspace_samples = samples[:, num_samples:]

        # warning num_new_subspace_samples does not really make sense for
        # induced sampling as new samples are not directly tied to newly
        # added basis
        num_new_subspace_samples = unique_subspace_samples.shape[1] * np.ones(
            new_subspace_indices.shape[1]) // new_subspace_indices.shape[1]
        return unique_subspace_samples, num_new_subspace_samples

    def _fit(self,
             pce,
             canonical_basis_matrix,
             samples,
             values,
             precond_func=None,
             omp_tol=0):
        # do to, just add columns to stored basis matrix
        # store qr factorization of basis_matrix and update the factorization
        # self.samples are in canonical domain
        if omp_tol == 0:
            coef = solve_preconditioned_least_squares(canonical_basis_matrix,
                                                      samples, values,
                                                      precond_func)
        else:
            coef = solve_preconditioned_orthogonal_matching_pursuit(
                canonical_basis_matrix, samples, values, precond_func, omp_tol)
        self.pce.set_coefficients(coef)

    def fit(self):
        return self.fit_function(self.pce, self.pce.canonical_basis_matrix,
                                 self.samples, self.values, **self.fit_opts)

    def add_new_subspaces(self, new_subspace_indices):
        num_new_subspaces = new_subspace_indices.shape[1]
        num_current_subspaces = self.subspace_indices.shape[1]
        num_new_subspace_samples = super(
            AdaptiveInducedPCE, self).add_new_subspaces(new_subspace_indices)

        self.fit()

        return num_new_subspace_samples

    def __call__(self, samples):
        return self.pce(samples)

    def get_active_unique_poly_indices(self):
        I = get_active_poly_array_indices(self)
        return self.poly_indices[:, I]

    def set_preconditioning_function(self, precond_func):
        """
        precond_func : callable
            Callable function with signature precond_func(basis_matrix,samples)
        """
        self.precond_func = precond_func
        self.fit_opts['precond_func'] = self.precond_func