示例#1
0
    def test_define_mixed_tensor_product_random_variable(self):
        """
        Construct a multivariate random variable from the tensor-product of
        different one-dimensional variables assuming that a given variable
        type the distribution parameters ARE NOT the same
        """
        univariate_variables = [
            stats.uniform(-1, 2),
            stats.beta(1, 1, -1, 2),
            stats.norm(-1, np.sqrt(4)),
            stats.uniform(),
            stats.uniform(-1, 2),
            stats.beta(2, 1, -2, 3)
        ]
        var_trans = AffineRandomVariableTransformation(univariate_variables)

        # first sample is on left boundary of all bounded variables
        # and one standard deviation to left of mean for gaussian variable
        # second sample is on right boundary of all bounded variables
        # and one standard deviation to right of mean for gaussian variable
        true_user_samples = np.asarray([[-1, -1, -3, 0, -1, -2],
                                        [1, 1, 1, 1, 1, 1]]).T

        canonical_samples = var_trans.map_to_canonical_space(true_user_samples)
        true_canonical_samples = np.ones_like(true_user_samples)
        true_canonical_samples[:, 0] = -1
        assert np.allclose(true_canonical_samples, canonical_samples)

        user_samples = var_trans.map_from_canonical_space(canonical_samples)
        assert np.allclose(user_samples, true_user_samples)
    def test_marginalize_polynomial_chaos_expansions(self):
        univariate_variables = [uniform(-1, 2), norm(0, 1), uniform(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        degree = 2
        indices = compute_hyperbolic_indices(num_vars, degree, 1)
        poly.set_indices(indices)
        poly.set_coefficients(np.ones((indices.shape[1], 1)))

        for ii in range(num_vars):
            # Marginalize out 2 variables
            xx = np.linspace(-1, 1, 101)
            inactive_idx = np.hstack(
                (np.arange(ii), np.arange(ii + 1, num_vars)))
            marginalized_pce = marginalize_polynomial_chaos_expansion(
                poly, inactive_idx)
            mvals = marginalized_pce(xx[None, :])
            variable_ii = variable.all_variables()[ii:ii + 1]
            var_trans_ii = AffineRandomVariableTransformation(variable_ii)
            poly_ii = PolynomialChaosExpansion()
            poly_opts_ii = define_poly_options_from_variable_transformation(
                var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(1, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx[None, :])
            # import matplotlib.pyplot as plt
            # plt.plot(xx, pvals)
            # plt.plot(xx, mvals, '--')
            # plt.show()
            assert np.allclose(mvals, pvals)

            # Marginalize out 1 variable
            xx = cartesian_product([xx] * 2)
            inactive_idx = np.array([ii])
            marginalized_pce = marginalize_polynomial_chaos_expansion(
                poly, inactive_idx)
            mvals = marginalized_pce(xx)
            variable_ii = variable.all_variables()[:ii]+\
                variable.all_variables()[ii+1:]
            var_trans_ii = AffineRandomVariableTransformation(variable_ii)
            poly_ii = PolynomialChaosExpansion()
            poly_opts_ii = define_poly_options_from_variable_transformation(
                var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(2, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx)
            assert np.allclose(mvals, pvals)
示例#3
0
    def test_evaluate_multivariate_mixed_basis_pce_moments(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [beta(alpha_stat, beta_stat, 0, 1), norm(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_quadrature_rules = [
            partial(gauss_jacobi_pts_wts_1D,
                    alpha_poly=beta_stat - 1,
                    beta_poly=alpha_stat - 1), gauss_hermite_pts_wts_1D
        ]
        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rules,
            var_trans.map_from_canonical_space)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)
        basis_matrix = poly.basis_matrix(samples)
        values = basis_matrix.dot(coef)
        true_mean = values.T.dot(weights)
        true_variance = (values.T**2).dot(weights) - true_mean**2

        assert np.allclose(poly.mean(), true_mean)
        assert np.allclose(poly.variance(), true_variance)
示例#4
0
def get_total_degree_polynomials(univariate_variables, degrees):
    assert type(univariate_variables[0]) == list
    assert len(univariate_variables) == len(degrees)
    polys, nparams = [], []
    for ii in range(len(degrees)):
        poly = PolynomialChaosExpansion()
        var_trans = AffineRandomVariableTransformation(
            univariate_variables[ii])
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        indices = compute_hyperbolic_indices(var_trans.num_vars(), degrees[ii],
                                             1.0)
        poly.set_indices(indices)
        polys.append(poly)
        nparams.append(indices.shape[1])
    return polys, np.array(nparams)
示例#5
0
    def test_pce_jacobian(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [beta(alpha_stat, beta_stat, 0, 1), norm(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        sample = generate_independent_random_samples(variable, 1)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)

        jac = poly.jacobian(sample)
        from pyapprox.optimization import approx_jacobian
        fd_jac = approx_jacobian(lambda x: poly(x[:, np.newaxis])[0, :],
                                 sample[:, 0])
        assert np.allclose(jac, fd_jac)
def get_polynomial_from_variable(variable):
    var_trans = AffineRandomVariableTransformation(
        variable)
    poly = PolynomialChaosExpansion()
    poly_opts = define_poly_options_from_variable_transformation(var_trans)
    poly.configure(poly_opts)
    return poly
示例#7
0
def remove_variables_from_polynomial_chaos_expansion(poly, inactive_idx):
    """
    This function is not optimal. It will recreate the options
    used to configure the polynomial. Any recursion coefficients 
    calculated which are still relevant will need to be computed.
    This is probably not a large overhead though
    """
    fixed_pce = PolynomialChaosExpansion()
    opts = poly.config_opts.copy()
    opts['var_trans'] = AffineRandomVariableTransformation(
        IndependentMultivariateRandomVariable(
            poly.var_trans.variables.all_variables()[inactive_idx]))

    if opts['poly_types'] is not None:
        for key, poly_opts in opts['poly_types'].items():
            var_nums = poly_opts['var_nums']
            poly_opts['var_nums'] = np.array([
                var_nums[ii] for ii in range(len(var_nums))
                if var_nums[ii] not in inactive_idx
            ])
    #else # no need to do anything same basis is used for all variables

    fixed_pce.configure(opts)
    if poly.indices is not None:
        active_idx = np.setdiff1d(np.arange(poly.num_vars()), inactive_idx)
        reduced_indices = indices[active_idx, :]
    pce.set_indices(reduced_indices)
    assert pce.coefficients is None
    return fixed_pce
示例#8
0
    def test_multivariate_sampling_jacobi(self):

        num_vars = 2
        degree = 2
        alph = 1
        bet = 1.
        univ_inv = partial(idistinv_jacobi, alph=alph, bet=bet)
        num_samples = 10
        indices = np.ones((2, num_samples), dtype=int) * degree
        indices[1, :] = degree - 1
        xx = np.tile(
            np.linspace(0.01, 0.99, (num_samples))[np.newaxis, :],
            (num_vars, 1))
        samples = univ_inv(xx, indices)

        var_trans = AffineRandomVariableTransformation(
            [beta(bet + 1, alph + 1, -1, 2),
             beta(bet + 1, alph + 1, -1, 2)])
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)

        reference_samples = inverse_transform_sampling_1d(
            pce.var_trans.variable.unique_variables[0],
            pce.recursion_coeffs[0], degree, xx[0, :])
        # differences are just caused by different tolerances in optimizes
        # used to find roots of CDF
        assert np.allclose(reference_samples, samples[0, :], atol=1e-7)
        reference_samples = inverse_transform_sampling_1d(
            pce.var_trans.variable.unique_variables[0],
            pce.recursion_coeffs[0], degree - 1, xx[0, :])
        assert np.allclose(reference_samples, samples[1, :], atol=1e-7)
示例#9
0
 def test_map_derivatives(self):
     nvars = 2
     nsamples = 10
     x = np.random.uniform(0, 1, (nvars, nsamples))
     # vals = np.sum(x**2, axis=0)[:, None]
     grad = np.vstack([2 * x[ii:ii + 1, :] for ii in range(nvars)])
     var_trans = AffineRandomVariableTransformation(
         [stats.uniform(0, 1), stats.uniform(2, 2)])
     canonical_derivs = var_trans.map_derivatives_to_canonical_space(grad)
     for ii in range(nvars):
         lb, ub = var_trans.variable.all_variables()[ii].interval(1)
         assert np.allclose(canonical_derivs[ii, :],
                            (ub - lb) * grad[ii, :] / 2)
     recovered_derivs = var_trans.map_derivatives_from_canonical_space(
         canonical_derivs)
     assert np.allclose(recovered_derivs, grad)
示例#10
0
def preconditioned_barycentric_weights():
    nmasses = 20
    xk = np.array(range(nmasses), dtype='float')
    pk = np.ones(nmasses) / nmasses
    var1 = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))()
    univariate_variables = [var1]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    var_trans = AffineRandomVariableTransformation(variable)
    growth_rule = partial(constant_increment_growth_rule, 2)
    quad_rule = get_univariate_leja_quadrature_rule(var1, growth_rule)
    samples = quad_rule(3)[0]
    num_samples = samples.shape[0]
    poly = PolynomialChaosExpansion()
    poly_opts = define_poly_options_from_variable_transformation(var_trans)
    poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-5
    poly.configure(poly_opts)
    poly.set_indices(np.arange(num_samples))

    # precond_weights = np.sqrt(
    #    (poly.basis_matrix(samples[np.newaxis,:])**2).mean(axis=1))
    precond_weights = np.ones(num_samples)

    bary_weights = compute_barycentric_weights_1d(
        samples, interval_length=samples.max() - samples.min())

    def barysum(x, y, w, f):
        x = x[:, np.newaxis]
        y = y[np.newaxis, :]
        temp = w * f / (x - y)
        return np.sum(temp, axis=1)

    def function(x):
        return np.cos(2 * np.pi * x)

    y = samples
    print(samples)
    w = precond_weights * bary_weights
    # x = np.linspace(-3,3,301)
    x = np.linspace(-1, 1, 301)
    f = function(y) / precond_weights

    # cannot interpolate on data
    II = []
    for ii, xx in enumerate(x):
        if xx in samples:
            II.append(ii)
    x = np.delete(x, II)

    r1 = barysum(x, y, w, f)
    r2 = barysum(x, y, w, 1 / precond_weights)
    interp_vals = r1 / r2
    # import matplotlib.pyplot as plt
    # plt.plot(x, interp_vals, 'k')
    # plt.plot(samples, function(samples), 'ro')
    # plt.plot(x, function(x), 'r--')
    # plt.plot(samples,function(samples),'ro')
    # print(num_samples)
    # print(precond_weights)
    print(np.linalg.norm(interp_vals - function(x)))
示例#11
0
    def test_independent_discrete_samples(self):

        variable = IndependentMultivariateRandomVariable(
            self.discrete_variables)
        var_trans = AffineRandomVariableTransformation(variable)

        num_samples = int(1e6)
        samples = generate_independent_random_samples(var_trans.variable,
                                                      num_samples)
        mean = samples.mean(axis=1)
        assert np.allclose(mean, self.discrete_mean, rtol=1e-2)
示例#12
0
    def test_identity_map_subset(self):
        num_vars = 3
        var_trans = define_iid_random_variable_transformation(
            stats.uniform(0, 1), num_vars)
        var_trans.set_identity_maps([1])

        samples = np.random.uniform(0, 1, (num_vars, 4))
        canonical_samples = var_trans.map_to_canonical_space(samples)
        assert np.allclose(canonical_samples[1, :], samples[1, :])

        assert np.allclose(
            var_trans.map_from_canonical_space(canonical_samples), samples)

        univariate_variables = [
            stats.uniform(-1, 2),
            stats.beta(1, 1, -1, 2),
            stats.norm(-1, np.sqrt(4)),
            stats.uniform(),
            stats.uniform(-1, 2),
            stats.beta(2, 1, -2, 3)
        ]
        var_trans = AffineRandomVariableTransformation(univariate_variables)
        var_trans.set_identity_maps([4, 2])

        from pyapprox.probability_measure_sampling import \
            generate_independent_random_samples
        samples = generate_independent_random_samples(var_trans.variable, 10)
        canonical_samples = var_trans.map_to_canonical_space(samples)
        assert np.allclose(canonical_samples[[2, 4], :], samples[[2, 4], :])

        assert np.allclose(
            var_trans.map_from_canonical_space(canonical_samples), samples)
示例#13
0
 def test_float_rv_discrete_chebyshev(self):
     N, degree = 10, 5
     xk, pk = np.geomspace(1.0, 512.0, num=N), np.ones(N) / N
     rv = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))()
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-9
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     p = poly.basis_matrix(xk[np.newaxis, :])
     w = pk
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
示例#14
0
 def test_hahn_hypergeometric(self):
     degree = 4
     M, n, N = 20, 7, 12
     rv = hypergeom(M, n, N)
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     xk = np.arange(0, n + 1)[np.newaxis, :]
     p = poly.basis_matrix(xk)
     w = rv.pmf(xk[0, :])
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
示例#15
0
 def test_krawtchouk_binomial(self):
     degree = 4
     n, p = 10, 0.5
     rv = binom(n, p)
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     xk = np.arange(0, n + 1)[np.newaxis, :]
     p = poly.basis_matrix(xk)
     w = rv.pmf(xk[0, :])
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
示例#16
0
    def test_adaptive_multivariate_sampling_jacobi(self):

        num_vars = 2
        degree = 6
        alph = 5
        bet = 5.

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, -1, 3)],
                                                  [np.arange(num_vars)]))
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        indices = compute_hyperbolic_indices(num_vars, 1, 1.0)
        pce.set_indices(indices)
        cond_tol = 1e2
        samples = generate_induced_samples_migliorati_tolerance(pce, cond_tol)

        for dd in range(2, degree):
            num_prev_samples = samples.shape[1]
            new_indices = compute_hyperbolic_level_indices(num_vars, dd, 1.)
            samples = increment_induced_samples_migliorati(
                pce, cond_tol, samples, indices, new_indices)
            indices = np.hstack((indices, new_indices))
            pce.set_indices(indices)
            new_samples = samples[:, num_prev_samples:]
            prev_samples = samples[:, :num_prev_samples]
            #fig,axs = plt.subplots(1,2,figsize=(2*8,6))
            #from pyapprox.visualization import plot_2d_indices
            #axs[0].plot(prev_samples[0,:],prev_samples[1,:],'ko');
            #axs[0].plot(new_samples[0,:],new_samples[1,:],'ro');
            #plot_2d_indices(indices,other_indices=new_indices,ax=axs[1]);
            #plt.show()

        samples = var_trans.map_from_canonical_space(samples)
        cond = compute_preconditioned_basis_matrix_condition_number(
            pce.basis_matrix, samples)
        assert cond < cond_tol
示例#17
0
 def test_discrete_chebyshev(self):
     N, degree = 10, 5
     xk, pk = np.arange(N), np.ones(N) / N
     rv = float_rv_discrete(name='discrete_chebyshev', values=(xk, pk))()
     var_trans = AffineRandomVariableTransformation([rv])
     poly = PolynomialChaosExpansion()
     poly_opts = define_poly_options_from_variable_transformation(var_trans)
     poly.configure(poly_opts)
     poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
     p = poly.basis_matrix(xk[np.newaxis, :])
     w = pk
     # print((np.dot(p.T*w,p),np.eye(degree+1)))
     assert np.allclose(np.dot(p.T * w, p), np.eye(degree + 1))
示例#18
0
    def test_define_mixed_tensor_product_random_variable_contin_discrete(self):
        """
        Construct a multivariate random variable from the tensor-product of
        different one-dimensional variables assuming that a given variable
        type the distribution parameters ARE NOT the same
        """
        # parameters of binomial distribution
        num_trials = 10
        prob_success = 0.5
        univariate_variables = [
            stats.uniform(),
            stats.norm(-1, np.sqrt(4)),
            stats.norm(-1, np.sqrt(4)),
            stats.binom(num_trials, prob_success),
            stats.norm(-1, np.sqrt(4)),
            stats.uniform(0, 1),
            stats.uniform(0, 1),
            stats.binom(num_trials, prob_success)
        ]
        var_trans = AffineRandomVariableTransformation(univariate_variables)

        # first sample is on left boundary of all bounded variables
        # and one standard deviation to left of mean for gaussian variables
        # second sample is on right boundary of all bounded variables
        # and one standard deviation to right of mean for gaussian variable
        true_user_samples = np.asarray([[0, -3, -3, 0, -3, 0, 0, 0],
                                        [1, 1, 1, num_trials, 1, 1, 1, 10]]).T

        canonical_samples = var_trans.map_to_canonical_space(true_user_samples)
        true_canonical_samples = np.ones_like(true_user_samples)
        true_canonical_samples[:, 0] = -1
        true_canonical_samples[5, 0] = -1
        true_canonical_samples[3, :] = [-1, 1]
        true_canonical_samples[7, :] = [-1, 1]
        assert np.allclose(true_canonical_samples, canonical_samples)

        user_samples = var_trans.map_from_canonical_space(canonical_samples)
        assert np.allclose(user_samples, true_user_samples)
    def test_hermite_basis_for_lognormal_variables(self):
        def function(x):
            return (x.T)**2

        degree = 2
        # mu_g, sigma_g = 1e1, 0.1
        mu_l, sigma_l = 2.1e11, 2.1e10
        mu_g = np.log(mu_l**2 / np.sqrt(mu_l**2 + sigma_l**2))
        sigma_g = np.sqrt(np.log(1 + sigma_l**2 / mu_l**2))

        lognorm = stats.lognorm(s=sigma_g, scale=np.exp(mu_g))
        # assert np.allclose([lognorm.mean(), lognorm.std()], [mu_l, sigma_l])

        univariate_variables = [stats.norm(mu_g, sigma_g)]
        var_trans = AffineRandomVariableTransformation(univariate_variables)
        pce = PolynomialChaosExpansion()
        pce_opts = define_poly_options_from_variable_transformation(var_trans)
        pce.configure(pce_opts)
        pce.set_indices(
            compute_hyperbolic_indices(var_trans.num_vars(), degree, 1.))

        nsamples = int(1e6)
        samples = lognorm.rvs(nsamples)[None, :]
        values = function(samples)

        ntrain_samples = 20
        train_samples = lognorm.rvs(ntrain_samples)[None, :]
        train_values = function(train_samples)
        from pyapprox.quantile_regression import solve_quantile_regression, \
            solve_least_squares_regression
        coef = solve_quantile_regression(0.5,
                                         np.log(train_samples),
                                         train_values,
                                         pce.basis_matrix,
                                         normalize_vals=True)
        pce.set_coefficients(coef)
        print(pce.mean(), values.mean())
        assert np.allclose(pce.mean(), values.mean(), rtol=1e-3)
示例#20
0
    def test_map_rv_discrete(self):
        nvars = 2

        mass_locs = np.arange(5, 501, step=50)
        nmasses = mass_locs.shape[0]
        mass_probs = np.ones(nmasses, dtype=float) / float(nmasses)
        univariate_variables = [
            float_rv_discrete(name='float_rv_discrete',
                              values=(mass_locs, mass_probs))()
        ] * nvars

        variables = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variables)

        samples = np.vstack(
            [mass_locs[np.newaxis, :], mass_locs[0] * np.ones((1, nmasses))])
        canonical_samples = var_trans.map_to_canonical_space(samples)

        assert (canonical_samples[0].min() == -1)
        assert (canonical_samples[0].max() == 1)

        recovered_samples = var_trans.map_from_canonical_space(
            canonical_samples)
        assert np.allclose(recovered_samples, samples)
示例#21
0
    def test_independent_mixed_continuous_discrete_samples(self):

        univariate_variables = self.continuous_variables + self.discrete_variables
        I = np.random.permutation(len(univariate_variables))
        univariate_variables = [univariate_variables[ii] for ii in I]

        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)

        num_samples = int(5e6)
        samples = generate_independent_random_samples(var_trans.variable,
                                                      num_samples)
        mean = samples.mean(axis=1)

        true_mean = np.concatenate([self.continuous_mean,
                                    self.discrete_mean])[I]
        assert np.allclose(mean, true_mean, atol=1e-2)
def marginalize_polynomial_chaos_expansion(poly, inactive_idx, center=True):
    """
    This function is not optimal. It will recreate the options
    used to configure the polynomial. Any recursion coefficients
    calculated which are still relevant will need to be computed.
    This is probably not a large overhead though
    """
    marginalized_pce = PolynomialChaosExpansion()
    # poly.config_opts.copy will not work
    opts = copy.deepcopy(poly.config_opts)
    all_variables = poly.var_trans.variable.all_variables()
    active_idx = np.setdiff1d(np.arange(poly.num_vars()), inactive_idx)
    active_variables = IndependentMultivariateRandomVariable(
        [all_variables[ii] for ii in active_idx])
    opts['var_trans'] = AffineRandomVariableTransformation(active_variables)

    if opts['poly_types'] is not None:
        marginalized_var_nums = -np.ones(poly.num_vars())
        marginalized_var_nums[active_idx] = np.arange(active_idx.shape[0])
        keys_to_delete = []
        for key, poly_opts in opts['poly_types'].items():
            var_nums = poly_opts['var_nums']
            poly_opts['var_nums'] = np.array([
                marginalized_var_nums[v] for v in var_nums if v in active_idx
            ],
                                             dtype=int)
            if poly_opts['var_nums'].shape[0] == 0:
                keys_to_delete.append(key)
        for key in keys_to_delete:
            del opts['poly_types'][key]
    # else # no need to do anything same basis is used for all variables

    marginalized_pce.configure(opts)
    if poly.indices is not None:
        marginalized_array_indices = []
        for ii, index in enumerate(poly.indices.T):
            if ((index.sum() == 0 and center is False)
                    or np.any(index[active_idx]) and
                (not np.any(index[inactive_idx] > 0))):
                marginalized_array_indices.append(ii)
        marginalized_pce.set_indices(poly.indices[np.ix_(
            active_idx, np.array(marginalized_array_indices))])
        if poly.coefficients is not None:
            marginalized_pce.set_coefficients(
                poly.coefficients[marginalized_array_indices, :].copy())
    return marginalized_pce
    def test_adaptive_leja_sampling_II(self):
        """
        Using variance refinement indicator on additive function can 
        lead to some polynomial terms with more than one active variable.
        This is because errors on these terms will be small but not 
        necessarily near machine precision. Consequently test that index
        set is additive except for terms corresponding to subspace [1,1]
        with only moderate accuracy 1e-6

        """
        num_vars = 2
        alph = 5
        bet = 5.
        error_tol = 1e-7

        # randomize coefficients of random variables to create anisotropy
        a = np.random.uniform(0, 1, (num_vars, 1))

        def function(x):
            vals = [
                np.cos(np.pi * a[ii] * x[ii, :]) for ii in range(x.shape[0])
            ]
            vals = np.array(vals).sum(axis=0)[:, np.newaxis]
            return vals

        # function = lambda x: np.sum(a*x**2,axis=0)[:,np.newaxis]

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, 0, 1)],
                                                  [np.arange(num_vars)]))

        candidate_samples = -np.cos(
            np.random.uniform(0, np.pi, (num_vars, int(1e4))))
        pce = AdaptiveLejaPCE(num_vars,
                              candidate_samples,
                              factorization_type='fast')
        error, pce_slow = self.helper(function, var_trans, pce, np.inf,
                                      error_tol)
        print('leja sampling error', error)
        assert error < 10 * error_tol

        # assert index is additive except for [1,1] subspace terms
        subspace_num_active_vars = np.count_nonzero(pce.subspace_indices,
                                                    axis=0)
        assert np.where(subspace_num_active_vars > 1)[0].shape[0] == 1
    def get_univariate_quadrature_rules(self,
                                        variables,
                                        enforce_variable_bounds,
                                        univariate_quad_rule_info,
                                        quad_method,
                                        growth_incr=2):
        var_trans = AffineRandomVariableTransformation(
            variables, enforce_variable_bounds)

        if univariate_quad_rule_info is None:
            quad_rules, growth_rules, unique_quadrule_indices, \
                unique_max_level_1d = \
                    get_sparse_grid_univariate_leja_quadrature_rules_economical(
                        var_trans, method=quad_method, growth_incr=growth_incr)
        else:
            quad_rules, growth_rules = univariate_quad_rule_info
            unique_quadrule_indices = None
        return var_trans, quad_rules, growth_rules, unique_quadrule_indices
示例#25
0
    def test_pce_for_gumbel_variable(self):
        degree = 3
        mean, std = 1e4, 7.5e3
        beta = std * np.sqrt(6) / np.pi
        mu = mean - beta * np.euler_gamma
        rv1 = gumbel_r(loc=mu, scale=beta)
        assert np.allclose(rv1.mean(), mean) and np.allclose(rv1.std(), std)
        rv2 = lognorm(1)
        for rv in [rv2, rv1]:
            print(rv.dist.name)
            ncoef = degree + 1
            var_trans = AffineRandomVariableTransformation([rv])
            poly = PolynomialChaosExpansion()
            poly_opts = define_poly_options_from_variable_transformation(
                var_trans)
            poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-9
            poly.configure(poly_opts)
            poly.set_indices(np.arange(degree + 1)[np.newaxis, :])
            poly.set_coefficients(np.ones((poly.indices.shape[1], 1)))

            def integrand(x):
                p = poly.basis_matrix(x[np.newaxis, :])
                G = np.empty((x.shape[0], p.shape[1]**2))
                kk = 0
                for ii in range(p.shape[1]):
                    for jj in range(p.shape[1]):
                        G[:, kk] = p[:, ii] * p[:, jj]
                        kk += 1
                return G * rv.pdf(x)[:, None]

            lb, ub = rv.interval(1)
            interval_size = rv.interval(0.99)[1] - rv.interval(0.99)[0]
            interval_size *= 10
            from pyapprox.utilities import \
                integrate_using_univariate_gauss_legendre_quadrature_unbounded
            res = \
                integrate_using_univariate_gauss_legendre_quadrature_unbounded(
                    integrand, lb, ub, 10, interval_size=interval_size,
                    verbose=0, max_steps=10000)
            res = np.reshape(res,
                             (poly.indices.shape[1], poly.indices.shape[1]),
                             order='C')
            print(res - np.eye(degree + 1))
            assert np.allclose(res, np.eye(degree + 1), atol=1e-6)
    def test_adaptive_least_squares_induced_sampling(self):
        num_vars = 2; 
        alph=5; bet=5.

        def function(x):
            vals = [np.cos(np.pi*x[ii,:]) for ii in range(x.shape[0])]
            vals = np.array(vals).sum(axis=0)[:,np.newaxis]
            return vals
        #function = lambda x: np.sum(x**2,axis=0)[:,np.newaxis]
        
        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable(
                [beta(alph,bet,0,1)],[np.arange(num_vars)]))

        pce = AdaptiveInducedPCE(num_vars,cond_tol=1e2)
        error, pce = self.helper(function,var_trans,pce,4,0.)

        print('induced sampling error',error)
        assert error < 1e-14
    def test_adaptive_leja_sampling_I(self):
        """
        If function is isotropic small changes in priority can cause
        different subspace to be refined when using slow vs fast method. This
        will lead to different leja sequence due to the fact that LU 
        factorization is dependent on order of the columns, i.e. the order
        in which basis functions are added. The numerical difference is 
        caused by applying preconditioning directly to basis matrix using slow
        approach and re-weighting LU_factor matrix with ratio of 
        precond_weights/precond_weights_prev using fast approach.
        """
        num_vars = 2
        alph = 5
        bet = 5.

        # randomize coefficients of random variables to create anisotropy
        a = np.random.uniform(0, 1, (num_vars, 1))

        def function(x):
            return np.sum(a * x**2, axis=0)[:, np.newaxis]

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, 0, 1)],
                                                  [np.arange(num_vars)]))

        candidate_samples = -np.cos(
            np.random.uniform(0, np.pi, (num_vars, int(1e4))))
        pce = AdaptiveLejaPCE(num_vars,
                              candidate_samples,
                              factorization_type='fast')
        error, pce_slow = self.helper(function, var_trans, pce, 2, 0.)
        print('leja sampling error', error)
        assert error < 1e-14

        pce = AdaptiveLejaPCE(num_vars,
                              candidate_samples,
                              factorization_type='slow')
        error, pce_fast = self.helper(function, var_trans, pce, 2, 0.)
        print('leja sampling error', error)
        assert error < 1e-14

        assert np.allclose(pce_slow.samples, pce_fast.samples)
示例#28
0
    def test_conditional_moments_of_polynomial_chaos_expansion(self):
        num_vars = 3
        degree = 2
        inactive_idx = [0, 2]
        np.random.seed(1)
        # keep variables on canonical domain to make constructing
        # tensor product quadrature rule, used for testing, easier
        var = [uniform(-1, 2), beta(2, 2, -1, 2), norm(0, 1)]
        quad_rules = [
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=0, beta_poly=0),
            partial(gauss_jacobi_pts_wts_1D, alpha_poly=1, beta_poly=1),
            partial(gauss_hermite_pts_wts_1D)
        ]
        var_trans = AffineRandomVariableTransformation(var)
        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)
        poly.set_indices(compute_hyperbolic_indices(num_vars, degree, 1.0))
        poly.set_coefficients(
            np.arange(poly.indices.shape[1], dtype=float)[:, np.newaxis])

        fixed_samples = np.array(
            [[vv.rvs() for vv in np.array(var)[inactive_idx]]]).T
        mean, variance = conditional_moments_of_polynomial_chaos_expansion(
            poly, fixed_samples, inactive_idx, True)

        from pyapprox.utilities import get_all_sample_combinations
        from pyapprox.probability_measure_sampling import \
            generate_independent_random_samples
        active_idx = np.setdiff1d(np.arange(num_vars), inactive_idx)
        random_samples, weights = get_tensor_product_quadrature_rule(
            [2 * degree] * len(active_idx), len(active_idx),
            [quad_rules[ii] for ii in range(num_vars) if ii in active_idx])
        samples = get_all_sample_combinations(fixed_samples, random_samples)
        temp = samples[len(inactive_idx):].copy()
        samples[inactive_idx] = samples[:len(inactive_idx)]
        samples[active_idx] = temp

        true_mean = (poly(samples).T.dot(weights).T)
        true_variance = ((poly(samples)**2).T.dot(weights).T) - true_mean**2
        assert np.allclose(true_mean, mean)
        assert np.allclose(true_variance, variance)
示例#29
0
    def test_multivariate_migliorati_sampling_jacobi(self):

        num_vars = 1
        degree = 20
        alph = 5
        bet = 5.
        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, -1, 2)],
                                                  [np.arange(num_vars)]))
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)
        pce.set_indices(indices)

        cond_tol = 1e1
        samples = generate_induced_samples_migliorati_tolerance(pce, cond_tol)
        cond = compute_preconditioned_basis_matrix_condition_number(
            pce.canonical_basis_matrix, samples)
        assert cond < cond_tol
    def test_adaptive_least_squares_proability_measure_sampling(self):
        # set cond <1 to use random samples from probaility measure
        num_vars = 2
        alph = 5
        bet = 5.

        def function(x):
            vals = [np.cos(np.pi*x[ii, :]) for ii in range(x.shape[0])]
            vals = np.array(vals).sum(axis=0)[:, np.newaxis]
            return vals
        #function = lambda x: np.sum(x**2,axis=0)[:,np.newaxis]

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable(
                [beta(alph, bet, 0, 1)], [np.arange(num_vars)]))

        pce = AdaptiveInducedPCE(num_vars, cond_tol=0)
        pce.sample_ratio = 2
        error, pce = self.helper(function, var_trans, pce, 4, 0.)

        print('probability sampling error', error)
        assert error < 1e-10