Ejemplo n.º 1
0
    def test_multiply_multivariate_orthonormal_polynomial_expansions(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)

        degree1, degree2 = 3, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        max_degrees1 = poly1.indices.max(axis=1)
        max_degrees2 = poly2.indices.max(axis=1)
        product_coefs_1d = compute_product_coeffs_1d_for_each_variable(
            poly1, max_degrees1, max_degrees2)

        indices, coefs = multiply_multivariate_orthonormal_polynomial_expansions(
            product_coefs_1d, poly1.get_indices(), poly1.get_coefficients(),
            poly2.get_indices(), poly2.get_coefficients())

        poly3 = get_polynomial_from_variable(variable)
        poly3.set_indices(indices)
        poly3.set_coefficients(coefs)

        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))
Ejemplo n.º 2
0
    def test_multiply_pce(self):
        np.random.seed(1)
        np.set_printoptions(precision=16)
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 1, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))

        #coef1 = np.random.normal(0,1,(poly1.indices.shape[1],1))
        #coef2 = np.random.normal(0,1,(poly2.indices.shape[1],1))
        coef1 = np.arange(poly1.indices.shape[1])[:, np.newaxis]
        coef2 = np.arange(poly2.indices.shape[1])[:, np.newaxis]
        poly1.set_coefficients(coef1)
        poly2.set_coefficients(coef2)

        poly3 = poly1 * poly2
        samples = generate_independent_random_samples(variable, 10)
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))

        for order in range(4):
            poly = poly1**order
            assert np.allclose(poly(samples), poly1(samples)**order)
Ejemplo n.º 3
0
    def test_linear_gaussian_inference(self):
        # set random seed, so the data is reproducible each time
        np.random.seed(1)

        nobs = 10  # number of observations
        noise_stdev = .1  # standard deviation of noise
        x = np.linspace(0., 9., nobs)
        Amatrix = np.hstack([np.ones((nobs, 1)), x[:, np.newaxis]])

        univariate_variables = [norm(1, 1), norm(0, 4)]
        variables = IndependentMultivariateRandomVariable(univariate_variables)

        mtrue = 0.4  # true gradient
        ctrue = 2.  # true y-intercept
        true_sample = np.array([[ctrue, mtrue]]).T

        model = LinearModel(Amatrix)

        # make data
        data = noise_stdev * np.random.randn(nobs) + model(true_sample)[0, :]
        loglike = GaussianLogLike(model, data, noise_stdev**2)
        loglike = PYMC3LogLikeWrapper(loglike)

        # number of draws from the distribution
        ndraws = 5000
        # number of "burn-in points" (which we'll discard)
        nburn = min(1000, int(ndraws * 0.1))
        # number of parallel chains
        njobs = 4

        #algorithm='nuts'
        algorithm = 'metropolis'
        samples, effective_sample_size, map_sample = \
            run_bayesian_inference_gaussian_error_model(
                loglike,variables,ndraws,nburn,njobs,
                algorithm=algorithm,get_map=True,print_summary=False)

        prior_mean = np.asarray(
            [rv.mean() for rv in variables.all_variables()])
        prior_hessian = np.diag(
            [1. / rv.var() for rv in variables.all_variables()])
        noise_covariance_inv = 1. / noise_stdev**2 * np.eye(nobs)

        from pyapprox.bayesian_inference.laplace import \
                laplace_posterior_approximation_for_linear_models
        exact_mean, exact_covariance = \
            laplace_posterior_approximation_for_linear_models(
                Amatrix, prior_mean, prior_hessian,
                noise_covariance_inv, data)

        print('mcmc mean error', samples.mean(axis=1) - exact_mean)
        print('mcmc cov error', np.cov(samples) - exact_covariance)
        print('MAP sample', map_sample)
        print('exact mean', exact_mean.squeeze())
        print('exact cov', exact_covariance)
        assert np.allclose(map_sample, exact_mean)
        assert np.allclose(exact_mean.squeeze(),
                           samples.mean(axis=1),
                           atol=1e-2)
        assert np.allclose(exact_covariance, np.cov(samples), atol=1e-2)
    def test_marginalize_polynomial_chaos_expansions(self):
        univariate_variables = [uniform(-1, 2), norm(0, 1), uniform(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        degree = 2
        indices = compute_hyperbolic_indices(num_vars, degree, 1)
        poly.set_indices(indices)
        poly.set_coefficients(np.ones((indices.shape[1], 1)))

        for ii in range(num_vars):
            # Marginalize out 2 variables
            xx = np.linspace(-1, 1, 101)
            inactive_idx = np.hstack(
                (np.arange(ii), np.arange(ii + 1, num_vars)))
            marginalized_pce = marginalize_polynomial_chaos_expansion(
                poly, inactive_idx)
            mvals = marginalized_pce(xx[None, :])
            variable_ii = variable.all_variables()[ii:ii + 1]
            var_trans_ii = AffineRandomVariableTransformation(variable_ii)
            poly_ii = PolynomialChaosExpansion()
            poly_opts_ii = define_poly_options_from_variable_transformation(
                var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(1, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx[None, :])
            # import matplotlib.pyplot as plt
            # plt.plot(xx, pvals)
            # plt.plot(xx, mvals, '--')
            # plt.show()
            assert np.allclose(mvals, pvals)

            # Marginalize out 1 variable
            xx = cartesian_product([xx] * 2)
            inactive_idx = np.array([ii])
            marginalized_pce = marginalize_polynomial_chaos_expansion(
                poly, inactive_idx)
            mvals = marginalized_pce(xx)
            variable_ii = variable.all_variables()[:ii]+\
                variable.all_variables()[ii+1:]
            var_trans_ii = AffineRandomVariableTransformation(variable_ii)
            poly_ii = PolynomialChaosExpansion()
            poly_opts_ii = define_poly_options_from_variable_transformation(
                var_trans_ii)
            poly_ii.configure(poly_opts_ii)
            indices_ii = compute_hyperbolic_indices(2, degree, 1.)
            poly_ii.set_indices(indices_ii)
            poly_ii.set_coefficients(np.ones((indices_ii.shape[1], 1)))
            pvals = poly_ii(xx)
            assert np.allclose(mvals, pvals)
Ejemplo n.º 5
0
    def test_get_statistics(self):
        univariate_variables = [
            stats.uniform(2, 4), stats.beta(1, 1, -1, 2), stats.norm(0, 1)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        mean = variable.get_statistics('mean')
        assert np.allclose(mean.squeeze(), [4, 0, 0])

        intervals = variable.get_statistics('interval', alpha=1)
        assert np.allclose(intervals, np.array(
            [[2, 6], [-1, 1], [-np.inf, np.inf]]))
Ejemplo n.º 6
0
    def test_bayesian_importance_sampling_avar(self):
        np.random.seed(1)
        nrandom_vars = 2
        Amat = np.array([[-0.5, 1]])
        noise_std = 0.1
        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 1)] * nrandom_vars)
        prior_mean = prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)
        noise_cov_inv = np.eye(Amat.shape[0]) / noise_std**2
        true_sample = np.array([.4] * nrandom_vars)[:, None]
        collected_obs = Amat.dot(true_sample)
        collected_obs += np.random.normal(0, noise_std, (collected_obs.shape))
        exact_post_mean, exact_post_cov = \
            laplace_posterior_approximation_for_linear_models(
                Amat, prior_mean, prior_cov_inv, noise_cov_inv,
                collected_obs)

        chol_factor = np.linalg.cholesky(exact_post_cov)
        chol_factor_inv = np.linalg.inv(chol_factor)

        def g_model(samples):
            return np.exp(
                np.sum(chol_factor_inv.dot(samples - exact_post_mean),
                       axis=0))[:, None]

        nsamples = int(1e6)
        prior_samples = generate_independent_random_samples(
            prior_variable, nsamples)
        posterior_samples = chol_factor.dot(
            np.random.normal(0, 1, (nrandom_vars, nsamples))) + exact_post_mean

        g_mu, g_sigma = 0, np.sqrt(nrandom_vars)
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(g_mu, g_sigma)

        beta = .1
        cvar_exact = CVaR(beta)

        cvar_mc = conditional_value_at_risk(g_model(posterior_samples), beta)

        prior_pdf = prior_variable.pdf
        post_pdf = stats.multivariate_normal(mean=exact_post_mean[:, 0],
                                             cov=exact_post_cov).pdf
        weights = post_pdf(prior_samples.T) / prior_pdf(prior_samples)[:, 0]
        weights /= weights.sum()
        cvar_im = conditional_value_at_risk(g_model(prior_samples), beta,
                                            weights)
        # print(cvar_exact, cvar_mc, cvar_im)
        assert np.allclose(cvar_exact, cvar_mc, rtol=1e-3)
        assert np.allclose(cvar_exact, cvar_im, rtol=2e-3)
Ejemplo n.º 7
0
def setup_multi_level_advection_diffusion_benchmark(
        nvars, corr_len, max_eval_concurrency=1):
    r"""
    Compute functionals of the transient advection-diffusion (with 1 configure variables which controls the two spatial mesh resolutions and the timestep). An integer increase in the configure variable value will raise the 3 numerical discretiation paramaters by the same integer.

    See :func:`pyapprox_dev.advection_diffusion_wrappers.setup_advection_diffusion_benchmark` for details on function arguments and output.
    """
    from scipy import stats
    from pyapprox.models.wrappers import TimerModelWrapper, PoolModel, \
        WorkTrackingModel
    from pyapprox.models.wrappers import PoolModel
    from pyapprox.variables import IndependentMultivariateRandomVariable
    from pyapprox.benchmarks.benchmarks import Benchmark
    from pyapprox.models.wrappers import MultiLevelWrapper
    univariate_variables = [stats.uniform(-np.sqrt(3), 2*np.sqrt(3))]*nvars
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    final_time, degree = 1.0, 1
    options = {'corr_len': corr_len}
    base_model = AdvectionDiffusionModel(
        final_time, degree, qoi_functional_misc,
        second_order_timestepping=False, options=options)
    multilevel_model = MultiLevelWrapper(
        base_model, base_model.num_config_vars)
    # add wrapper to allow execution times to be captured
    timer_model = TimerModelWrapper(multilevel_model, base_model)
    pool_model = PoolModel(
        timer_model, max_eval_concurrency, base_model=base_model)
    model = WorkTrackingModel(
        pool_model, base_model, multilevel_model.num_config_vars)
    attributes = {'fun': model, 'variable': variable,
                  'multi_level_model': multilevel_model}
    return Benchmark(attributes)
Ejemplo n.º 8
0
    def test_evaluate_multivariate_mixed_basis_pce_moments(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [beta(alpha_stat, beta_stat, 0, 1), norm(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        univariate_quadrature_rules = [
            partial(gauss_jacobi_pts_wts_1D,
                    alpha_poly=beta_stat - 1,
                    beta_poly=alpha_stat - 1), gauss_hermite_pts_wts_1D
        ]
        samples, weights = get_tensor_product_quadrature_rule(
            degree + 1, num_vars, univariate_quadrature_rules,
            var_trans.map_from_canonical_space)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)
        basis_matrix = poly.basis_matrix(samples)
        values = basis_matrix.dot(coef)
        true_mean = values.T.dot(weights)
        true_variance = (values.T**2).dot(weights) - true_mean**2

        assert np.allclose(poly.mean(), true_mean)
        assert np.allclose(poly.variance(), true_variance)
Ejemplo n.º 9
0
    def test_batch_kl_oed(self):
        """
        No observations collected to inform subsequent designs
        """
        np.random.seed(1)
        nrandom_vars = 1
        noise_std = 1
        ndesign = 4
        nouter_loop_samples = 10000
        ninner_loop_samples = 31

        ncandidates = 11
        design_candidates = np.linspace(-1, 1, ncandidates)[None, :]

        def obs_fun(samples):
            assert design_candidates.ndim == 2
            assert samples.ndim == 2
            Amat = design_candidates.T
            return Amat.dot(samples).T

        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 1)] * nrandom_vars)

        x_quad, w_quad = gauss_hermite_pts_wts_1D(ninner_loop_samples)

        def generate_inner_prior_samples_gauss(n):
            # use precomputed samples so to avoid cost of regenerating
            assert n == x_quad.shape[0]
            return x_quad[None, :], w_quad

        generate_inner_prior_samples = generate_inner_prior_samples_gauss

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])
        oed = BayesianBatchKLOED(design_candidates, obs_fun, noise_std,
                                 prior_variable, nouter_loop_samples,
                                 ninner_loop_samples,
                                 generate_inner_prior_samples)
        oed.populate()
        oed.set_collected_design_indices(init_design_indices)

        for ii in range(len(init_design_indices), ndesign):
            # loop must be before oed.updated design because
            # which updates oed.collected_design_indices and thus
            # changes problem
            d_utility_vals = np.zeros(ncandidates)
            for kk in range(ncandidates):
                if kk not in oed.collected_design_indices:
                    new_design = np.hstack(
                        (design_candidates[:, oed.collected_design_indices],
                         design_candidates[:, kk:kk + 1]))
                    Amat = new_design.T
                    d_utility_vals[kk] = d_optimal_utility(Amat, noise_std)

            utility_vals, selected_indices = oed.update_design()
            # ignore entries of previously collected data
            II = np.where(d_utility_vals > 0)
            print((np.absolute(d_utility_vals[II] - utility_vals[II]) /
                   d_utility_vals[II]).max())
            assert np.allclose(d_utility_vals[II], utility_vals[II], rtol=4e-2)
Ejemplo n.º 10
0
def get_3_recursive_polynomial_components_multiple_qoi():
    """
    First model has multiple qoi which are used as coupling variables for
    the second model. Third model has multiple random variables

    f0(z0) -- f1(f00, f01, z3) --  f2(z1, z2, f1)

    f0(z0)    = [a00*z0**2, a00*z0]
    f1(f0,z1) = a10*f00(z0)**2+a11*f01(z0)**2) + a12*z3**2
    f2(z2,f1) = a20*(z1**2+z2) + a21*f1(f0,z2)**2


    The global component id of the coupling variables entering f1
    and the qoi id of the upstream component
    global_coupling_component_indices[1] = [0, 0, 0, 1].
    This means that the first coupling variable of f1 is
    is qoi 0 of component 0.
    """
    local_random_var_indices = [[0], [2], [0, 1]]
    local_coupling_var_indices_in = [[], [0, 1], [2]]
    global_random_var_indices = [[0], [3], [1, 2]]
    global_coupling_component_indices = [[], [0, 0, 0, 1], [1, 0]]
    ncomponents = len(local_random_var_indices)

    nlocal_vars = [
        len(local_random_var_indices[ii]) +
        len(local_coupling_var_indices_in[ii]) for ii in range(ncomponents)
    ]
    aa = [(ii + 2) * np.arange(1, 1 + nlocal_vars[ii])[np.newaxis, :]
          for ii in range(ncomponents)]

    def f1(x):
        return np.hstack([
            np.sum(aa[0].dot(x**2), axis=0)[:, np.newaxis],
            np.sum(aa[0].dot(x), axis=0)[:, np.newaxis]
        ])

    def f2(x):
        return np.sum(aa[1].dot(x**2), axis=0)[:, np.newaxis]

    def f3(x):
        return np.sum(aa[2].dot(x**2), axis=0)[:, np.newaxis]

    funs = [f1, f2, f3]

    labels = [r"$M_%d$" % ii for ii in range(ncomponents)]
    graph_data = {
        'label': labels,
        'functions': funs,
        'global_random_var_indices': global_random_var_indices,
        'local_random_var_indices': local_random_var_indices,
        'local_coupling_var_indices_in': local_coupling_var_indices_in,
        'global_coupling_component_indices': global_coupling_component_indices
    }
    graph = build_chain_graph(ncomponents, graph_data)

    nvars = np.unique(np.concatenate(global_random_var_indices)).sum()
    univariate_variables = [stats.uniform(0, 1)] * nvars
    variables = IndependentMultivariateRandomVariable(univariate_variables)
    return graph, variables, graph_data
Ejemplo n.º 11
0
def get_chaudhuri_3_component_system():
    def fn1(samples):
        x = samples[:3]
        C = samples[3:]
        A1 = np.array([[9.7236, 0.2486]])
        C1 = 0.01 * (x[0]**2 + 2 * x[1] -
                     x[2]) + A1.dot(C)[0, :] / np.linalg.norm(C, axis=0)
        y1 = 0.1 * x[0] + x[1] - 0.5 * x[2] + 10 * C1
        vals = np.vstack([C1, y1]).T
        return vals

    def fn2(samples):
        x = samples[:3]
        C = samples[3:]
        A2 = np.array([[0.2486, 9.7764]])
        C2 = 0.01 * (x[0] * x[1] + x[1]**2 +
                     x[2]) + A2.dot(C)[0, :] / np.linalg.norm(C, axis=0)
        y2 = 5 * x[1] - x[2] - 5 * C2
        return np.vstack([C2, y2]).T

    def fn3(samples):
        # samples y1, y2
        return (samples[0:1, :] + samples[1:2, :]).T

    nexog_vars = 5
    variable = IndependentMultivariateRandomVariable([stats.norm(1, 0.1)] *
                                                     nexog_vars)

    # 0.02+(9.7236*x+0.2486*y)/sqrt[x^2+y^2]-x=0
    # 0.03+(9.7764*y+0.2486*x)/sqrt[x^2+y^2]-y=0

    return [fn1, fn2, fn3], variable
Ejemplo n.º 12
0
    def test_pce_jacobian(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [beta(alpha_stat, beta_stat, 0, 1), norm(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        sample = generate_independent_random_samples(variable, 1)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)

        jac = poly.jacobian(sample)
        from pyapprox.optimization import approx_jacobian
        fd_jac = approx_jacobian(lambda x: poly(x[:, np.newaxis])[0, :],
                                 sample[:, 0])
        assert np.allclose(jac, fd_jac)
Ejemplo n.º 13
0
    def test_compute_multivariate_orthonormal_basis_product(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)

        poly1 = get_polynomial_from_variable(variable)
        poly2 = get_polynomial_from_variable(variable)

        max_degrees1, max_degrees2 = [3, 3], [2, 2]
        product_coefs_1d = compute_product_coeffs_1d_for_each_variable(
            poly1, max_degrees1, max_degrees2)

        for ii in range(max_degrees1[0]):
            for jj in range(max_degrees1[1]):
                poly_index_ii, poly_index_jj = np.array([ii, jj
                                                         ]), np.array([ii, jj])

                poly1.set_indices(poly_index_ii[:, np.newaxis])
                poly1.set_coefficients(np.ones([1, 1]))
                poly2.set_indices(poly_index_jj[:, np.newaxis])
                poly2.set_coefficients(np.ones([1, 1]))

                product_indices, product_coefs = \
                    compute_multivariate_orthonormal_basis_product(
                        product_coefs_1d, poly_index_ii, poly_index_jj,
                        max_degrees1, max_degrees2)

                poly_prod = get_polynomial_from_variable(variable)
                poly_prod.set_indices(product_indices)
                poly_prod.set_coefficients(product_coefs)

                samples = generate_independent_random_samples(variable, 5)
                # print(poly_prod(samples),poly1(samples)*poly2(samples))
            assert np.allclose(poly_prod(samples),
                               poly1(samples) * poly2(samples))
Ejemplo n.º 14
0
def define_chemical_reaction_random_variables():
    nominal_vars, ranges = get_chemical_reaction_variable_ranges()
    univariate_variables = [
        stats.uniform(ranges[2*ii], ranges[2*ii+1]-ranges[2*ii])
        for ii in range(len(ranges)//2)]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    return variable
Ejemplo n.º 15
0
    def __init__(self, variable, enforce_bounds=False):
        """
        Variable uniquness dependes on both the type of random variable
        e.g. beta, gaussian, etc. and the parameters of that distribution
        e.g. loc and scale parameters as well as any additional parameters
        """
        if (type(variable) != IndependentMultivariateRandomVariable):
            variable = IndependentMultivariateRandomVariable(variable)
        self.variable = variable
        self.enforce_bounds = enforce_bounds
        self.identity_map_indices = None

        self.scale_parameters = np.empty((self.variable.nunique_vars, 2))
        for ii in range(self.variable.nunique_vars):
            var = self.variable.unique_variables[ii]
            # name, scale_dict, __ = get_distribution_info(var)
            # copy is essential here because code below modifies scale
            # loc, scale = scale_dict['loc'].copy(), scale_dict['scale'].copy()
            # if (is_bounded_continuous_variable(var) or
            #     (type(var.dist) == float_rv_discrete and
            #      var.dist.name != 'discrete_chebyshev')):
            #     lb, ub = -1, 1
            #     scale /= (ub-lb)
            #     loc = loc-scale*lb
            self.scale_parameters[ii, :] = transform_scale_parameters(var)
Ejemplo n.º 16
0
def preconditioned_barycentric_weights():
    nmasses = 20
    xk = np.array(range(nmasses), dtype='float')
    pk = np.ones(nmasses) / nmasses
    var1 = float_rv_discrete(name='float_rv_discrete', values=(xk, pk))()
    univariate_variables = [var1]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    var_trans = AffineRandomVariableTransformation(variable)
    growth_rule = partial(constant_increment_growth_rule, 2)
    quad_rule = get_univariate_leja_quadrature_rule(var1, growth_rule)
    samples = quad_rule(3)[0]
    num_samples = samples.shape[0]
    poly = PolynomialChaosExpansion()
    poly_opts = define_poly_options_from_variable_transformation(var_trans)
    poly_opts['numerically_generated_poly_accuracy_tolerance'] = 1e-5
    poly.configure(poly_opts)
    poly.set_indices(np.arange(num_samples))

    # precond_weights = np.sqrt(
    #    (poly.basis_matrix(samples[np.newaxis,:])**2).mean(axis=1))
    precond_weights = np.ones(num_samples)

    bary_weights = compute_barycentric_weights_1d(
        samples, interval_length=samples.max() - samples.min())

    def barysum(x, y, w, f):
        x = x[:, np.newaxis]
        y = y[np.newaxis, :]
        temp = w * f / (x - y)
        return np.sum(temp, axis=1)

    def function(x):
        return np.cos(2 * np.pi * x)

    y = samples
    print(samples)
    w = precond_weights * bary_weights
    # x = np.linspace(-3,3,301)
    x = np.linspace(-1, 1, 301)
    f = function(y) / precond_weights

    # cannot interpolate on data
    II = []
    for ii, xx in enumerate(x):
        if xx in samples:
            II.append(ii)
    x = np.delete(x, II)

    r1 = barysum(x, y, w, f)
    r2 = barysum(x, y, w, 1 / precond_weights)
    interp_vals = r1 / r2
    # import matplotlib.pyplot as plt
    # plt.plot(x, interp_vals, 'k')
    # plt.plot(samples, function(samples), 'ro')
    # plt.plot(x, function(x), 'r--')
    # plt.plot(samples,function(samples),'ro')
    # print(num_samples)
    # print(precond_weights)
    print(np.linalg.norm(interp_vals - function(x)))
Ejemplo n.º 17
0
def define_random_oscillator_random_variables():
    ranges = np.array([0.08, 0.12, 0.03, 0.04, 0.08, 0.12, 0.8, 1.2,
                       0.45, 0.55, -0.05, 0.05], np.double)
    univariate_variables = [
        stats.uniform(ranges[2*ii], ranges[2*ii+1]-ranges[2*ii])
        for ii in range(len(ranges)//2)]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    return variable
Ejemplo n.º 18
0
def define_coupled_springs_random_variables():
    ranges = np.array([0.9, 1.1,  1.4, 1.6,  7., 9.,  39., 41.,
                       0.4, 0.6,  0.9, 1.1,  0.7, 0.9,  0.4, 0.6,
                       0.4, 0.6,  -0.1, 0.1,  2.2, 2.3,  -0.1, 0.1],
                      np.double)
    univariate_variables = [
        stats.uniform(ranges[2*ii], ranges[2*ii+1]-ranges[2*ii])
        for ii in range(len(ranges)//2)]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    return variable
Ejemplo n.º 19
0
    def test_independent_discrete_samples(self):

        variable = IndependentMultivariateRandomVariable(
            self.discrete_variables)
        var_trans = AffineRandomVariableTransformation(variable)

        num_samples = int(1e6)
        samples = generate_independent_random_samples(var_trans.variable,
                                                      num_samples)
        mean = samples.mean(axis=1)
        assert np.allclose(mean, self.discrete_mean, rtol=1e-2)
def define_piston_random_variables():
    M = stats.uniform(loc=30., scale=30.)
    S = stats.uniform(loc=0.005, scale=0.015)
    V_0 = stats.uniform(loc=0.002, scale=0.008)
    k = stats.uniform(loc=1000., scale=4000.)
    P_0 = stats.uniform(loc=90000., scale=20000.)
    T_a = stats.uniform(loc=290., scale=6.)
    T_0 = stats.uniform(loc=340., scale=20.)

    variable = IndependentMultivariateRandomVariable([M, S, V_0, k,
                                                      P_0, T_a, T_0])
    return variable
Ejemplo n.º 21
0
    def test_define_mixed_tensor_product_random_variable_I(self):
        """
        Construct a multivariate random variable from the tensor-product of
        different one-dimensional variables assuming that a given variable type
        the distribution parameters ARE the same
        """
        univariate_variables = [
            stats.uniform(-1, 2), stats.beta(1, 1, -1, 2), stats.norm(0, 1), stats.uniform(-1, 2),
            stats.uniform(-1, 2), stats.beta(1, 1, -1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)

        assert len(variable.unique_variables) == 3
        assert lists_of_arrays_equal(variable.unique_variable_indices,
                                     [[0, 3, 4], [1, 5], [2]])
Ejemplo n.º 22
0
def define_nondim_hastings_ecology_random_variables():
    nominal_sample = get_nondim_hastings_ecology_nominal_values()
    ranges = np.zeros((2*len(nominal_sample)), np.double)
    ranges[::2] = nominal_sample*0.95
    ranges[1::2] = nominal_sample*1.05
    ranges[:2] = 4.9, 5.1
    ranges[12:14] = 0, 1
    ranges[14:16] = 0, 1
    ranges[16:18] = 5, 12
    univariate_variables = [
        stats.uniform(ranges[2*ii], ranges[2*ii+1]-ranges[2*ii])
        for ii in range(len(ranges)//2)]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    return variable
Ejemplo n.º 23
0
def define_wing_weight_random_variables():
    univariate_variables = [
        stats.uniform(150, 50),
        stats.uniform(220, 80),
        stats.uniform(6, 4),
        stats.uniform(-10, 20),
        stats.uniform(16, 29),
        stats.uniform(0.5, 0.5),
        stats.uniform(0.08, 0.1),
        stats.uniform(2.5, 3.5),
        stats.uniform(1700, 800),
        stats.uniform(0.025, 0.055)]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    return variable
Ejemplo n.º 24
0
    def test_add_pce(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 2, 3
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        poly3 = poly1 + poly2 + poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) + 2 * poly2(samples))

        poly4 = poly1 - poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly4(samples), poly1(samples) - poly2(samples))
Ejemplo n.º 25
0
def define_beam_random_variables():
    # traditional parameterization
    X = ss.norm(loc=500,scale=np.sqrt(100)**2)
    Y = ss.norm(loc=1000,scale=np.sqrt(100)**2)
    E = ss.norm(loc=2.9e7,scale=np.sqrt(1.45e6)**2)
    R = ss.norm(loc=40000,scale=np.sqrt(2000)**2)

    # increased total variance contribution from E
    X = ss.norm(loc=500,scale=np.sqrt(100)**2/10)
    Y = ss.norm(loc=1000,scale=np.sqrt(100)**2/10)
    E = ss.norm(loc=2.9e7,scale=np.sqrt(1.45e6)**2)
    R = ss.norm(loc=40000,scale=np.sqrt(2000)**2/10)

    variable = IndependentMultivariateRandomVariable([X,Y,E,R])
    return variable
def marginalize_polynomial_chaos_expansion(poly, inactive_idx, center=True):
    """
    This function is not optimal. It will recreate the options
    used to configure the polynomial. Any recursion coefficients
    calculated which are still relevant will need to be computed.
    This is probably not a large overhead though
    """
    marginalized_pce = PolynomialChaosExpansion()
    # poly.config_opts.copy will not work
    opts = copy.deepcopy(poly.config_opts)
    all_variables = poly.var_trans.variable.all_variables()
    active_idx = np.setdiff1d(np.arange(poly.num_vars()), inactive_idx)
    active_variables = IndependentMultivariateRandomVariable(
        [all_variables[ii] for ii in active_idx])
    opts['var_trans'] = AffineRandomVariableTransformation(active_variables)

    if opts['poly_types'] is not None:
        marginalized_var_nums = -np.ones(poly.num_vars())
        marginalized_var_nums[active_idx] = np.arange(active_idx.shape[0])
        keys_to_delete = []
        for key, poly_opts in opts['poly_types'].items():
            var_nums = poly_opts['var_nums']
            poly_opts['var_nums'] = np.array([
                marginalized_var_nums[v] for v in var_nums if v in active_idx
            ],
                                             dtype=int)
            if poly_opts['var_nums'].shape[0] == 0:
                keys_to_delete.append(key)
        for key in keys_to_delete:
            del opts['poly_types'][key]
    # else # no need to do anything same basis is used for all variables

    marginalized_pce.configure(opts)
    if poly.indices is not None:
        marginalized_array_indices = []
        for ii, index in enumerate(poly.indices.T):
            if ((index.sum() == 0 and center is False)
                    or np.any(index[active_idx]) and
                (not np.any(index[inactive_idx] > 0))):
                marginalized_array_indices.append(ii)
        marginalized_pce.set_indices(poly.indices[np.ix_(
            active_idx, np.array(marginalized_array_indices))])
        if poly.coefficients is not None:
            marginalized_pce.set_coefficients(
                poly.coefficients[marginalized_array_indices, :].copy())
    return marginalized_pce
Ejemplo n.º 27
0
    def test_independent_mixed_continuous_discrete_samples(self):

        univariate_variables = self.continuous_variables + self.discrete_variables
        I = np.random.permutation(len(univariate_variables))
        univariate_variables = [univariate_variables[ii] for ii in I]

        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)

        num_samples = int(5e6)
        samples = generate_independent_random_samples(var_trans.variable,
                                                      num_samples)
        mean = samples.mean(axis=1)

        true_mean = np.concatenate([self.continuous_mean,
                                    self.discrete_mean])[I]
        assert np.allclose(mean, true_mean, atol=1e-2)
    def test_adaptive_leja_sampling_II(self):
        """
        Using variance refinement indicator on additive function can 
        lead to some polynomial terms with more than one active variable.
        This is because errors on these terms will be small but not 
        necessarily near machine precision. Consequently test that index
        set is additive except for terms corresponding to subspace [1,1]
        with only moderate accuracy 1e-6

        """
        num_vars = 2
        alph = 5
        bet = 5.
        error_tol = 1e-7

        # randomize coefficients of random variables to create anisotropy
        a = np.random.uniform(0, 1, (num_vars, 1))

        def function(x):
            vals = [
                np.cos(np.pi * a[ii] * x[ii, :]) for ii in range(x.shape[0])
            ]
            vals = np.array(vals).sum(axis=0)[:, np.newaxis]
            return vals

        # function = lambda x: np.sum(a*x**2,axis=0)[:,np.newaxis]

        var_trans = AffineRandomVariableTransformation(
            IndependentMultivariateRandomVariable([beta(alph, bet, 0, 1)],
                                                  [np.arange(num_vars)]))

        candidate_samples = -np.cos(
            np.random.uniform(0, np.pi, (num_vars, int(1e4))))
        pce = AdaptiveLejaPCE(num_vars,
                              candidate_samples,
                              factorization_type='fast')
        error, pce_slow = self.helper(function, var_trans, pce, np.inf,
                                      error_tol)
        print('leja sampling error', error)
        assert error < 10 * error_tol

        # assert index is additive except for [1,1] subspace terms
        subspace_num_active_vars = np.count_nonzero(pce.subspace_indices,
                                                    axis=0)
        assert np.where(subspace_num_active_vars > 1)[0].shape[0] == 1
 def test_sobol_sequence_variable_transformation(self):
     from pyapprox.variables import IndependentMultivariateRandomVariable
     from scipy.stats import uniform
     variables = IndependentMultivariateRandomVariable(
         [uniform(-1, 2), uniform(0, 1),
          uniform(0, 3)])
     samples = sobol_sequence(3, 10, variable=variables)
     true_samples = np.asarray([[0, 0, 0], [0.5, 0.5, 0.5],
                                [0.75, 0.25, 0.25], [0.25, 0.75, 0.75],
                                [0.375, 0.375,
                                 0.625], [0.875, 0.875, 0.125],
                                [0.625, 0.125, 0.875],
                                [0.125, 0.625, 0.375],
                                [0.1875, 0.3125, 0.9375],
                                [0.6875, 0.8125, 0.4375]]).T
     true_samples[0, :] = true_samples[0, :] * 2 - 1
     true_samples[2, :] = true_samples[2, :] * 3
     assert np.allclose(true_samples, samples)
Ejemplo n.º 30
0
def define_beam_random_variables():
    # traditional parameterization
    X = stats.norm(loc=500, scale=np.sqrt(100)**2)
    Y = stats.norm(loc=1000, scale=np.sqrt(100)**2)
    E = stats.norm(loc=2.9e7, scale=np.sqrt(1.45e6)**2)
    R = stats.norm(loc=40000, scale=np.sqrt(2000)**2)

    # increased total variance contribution from E
    # X = stats.norm(loc=500,scale=np.sqrt(100)**2/10)
    # Y = stats.norm(loc=1000,scale=np.sqrt(100)**2/10)
    # E = stats.norm(loc=2.9e7,scale=np.sqrt(1.45e6)**2)
    # R = stats.norm(loc=40000,scale=np.sqrt(2000)**2/10)

    from scipy.optimize import Bounds
    design_bounds = Bounds([1, 1], [4, 4])
    design_variable = DesignVariable(design_bounds)

    variable = IndependentMultivariateRandomVariable([X, Y, E, R])
    return variable, design_variable