def test_sample_based_apc_orthonormality(self):
        num_vars = 1
        alpha_stat = 2
        beta_stat = 5
        degree = 2

        pce_var_trans = define_iid_random_variable_transformation(
            stats.uniform(0, 1), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(
            pce_var_trans)

        random_var_trans = define_iid_random_variable_transformation(
            stats.beta(alpha_stat, beta_stat), num_vars)

        num_moment_samples = 10000
        moment_matrix_samples = generate_independent_random_samples(
            random_var_trans.variable, num_moment_samples)

        compute_moment_matrix_function = partial(
            compute_moment_matrix_from_samples, samples=moment_matrix_samples)

        pce = APC(compute_moment_matrix_function)
        pce.configure(pce_opts)

        num_samples = 10000
        samples = generate_independent_random_samples(
            random_var_trans.variable, num_samples)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        pce.set_indices(indices)
        basis_matrix = pce.basis_matrix(samples)
        assert np.allclose(np.dot(basis_matrix.T, basis_matrix) / num_samples,
                           np.eye(basis_matrix.shape[1]),
                           atol=1e-1)
Exemplo n.º 2
0
def get_AB_sample_sets_for_sobol_sensitivity_analysis(variables,
                                                      nsamples,
                                                      method,
                                                      qmc_start_index=0):
    if method == 'random':
        samplesA = generate_independent_random_samples(variables, nsamples)
        samplesB = generate_independent_random_samples(variables, nsamples)
    elif method == 'halton' or 'sobol':
        nvars = variables.num_vars()
        if method == 'halton':
            qmc_samples = halton_sequence(2 * nvars, qmc_start_index,
                                          qmc_start_index + nsamples)
        else:
            qmc_samples = sobol_sequence(2 * nvars, nsamples, qmc_start_index)
        samplesA = qmc_samples[:nvars, :]
        samplesB = qmc_samples[nvars:, :]
        for ii, rv in enumerate(variables.all_variables()):
            lb, ub = rv.interval(1)
            # transformation is undefined at [0,1] for unbouned random variables
            # create bounds for unbounded interval that exclude 1e-8
            # of the total probability
            t1, t2 = rv.interval(1 - 1e-8)
            nlb, nub = rv.cdf([t1, t2])
            if not np.isfinite(lb):
                samplesA[ii, samplesA[ii, :] == 0] = nlb
                samplesB[ii, samplesB[ii, :] == 0] = nlb
            if not np.isfinite(ub):
                samplesA[ii, samplesA[ii, :] == 1] = nub
                samplesB[ii, samplesB[ii, :] == 1] = nub
            samplesA[ii, :] = rv.ppf(samplesA[ii, :])
            samplesB[ii, :] = rv.ppf(samplesB[ii, :])
    else:
        raise Exception(f'Sampling method {method} not supported')
    return samplesA, samplesB
Exemplo n.º 3
0
def generate_probability_samples_tolerance(pce,
                                           nindices,
                                           cond_tol,
                                           samples=None,
                                           verbosity=0):
    r"""
    Add samples in integer increments of nindices.
    E.g. if try nsamples = nindices, 2*nindices, 3*nindices
    until condition number is less than tolerance.

    Parameters
    ----------
    samples : np.ndarray
        samples in the canonical domain of the polynomial

    Returns
    -------
    new_samples : np.ndarray(nvars, nnew_samples)
        New samples appended to samples. must be in canonical space
    """
    variable = pce.var_trans.variable
    if samples is None:
        new_samples = generate_independent_random_samples(variable, nindices)
        new_samples = pce.var_trans.map_to_canonical_space(new_samples)
    else:
        new_samples = samples.copy()
    cond = compute_preconditioned_canonical_basis_matrix_condition_number(
        pce, new_samples)
    if verbosity > 0:
        print('\tCond No.', cond, 'No. samples', new_samples.shape[1],
              'cond tol', cond_tol)
    cnt = 1
    max_nsamples = 1000 * pce.indices.shape[1]
    while cond > cond_tol:
        tmp = generate_independent_random_samples(variable, cnt * nindices)
        tmp = pce.var_trans.map_to_canonical_space(tmp)
        new_samples = np.hstack((new_samples, tmp))
        cond = compute_preconditioned_canonical_basis_matrix_condition_number(
            pce, new_samples)
        if verbosity > 0:
            print('\tCond No.', cond, 'No. samples', new_samples.shape[1],
                  'cond tol', cond_tol)
        # double number of samples added so loop does not take to long
        cnt *= 2
        if new_samples.shape[1] > max_nsamples:
            msg = "Basis and sample combination is ill conditioned"
            raise RuntimeError(msg)
    return new_samples
Exemplo n.º 4
0
def find_deterministic_beam_design():
    lower_bound = 0

    uq_vars, objective, constraint_functions, bounds, init_design_sample = \
        setup_beam_design()

    uq_nominal_sample = uq_vars.get_statistics('mean')
    #constraints_info = [{'type':'deterministic',
    #                     'lower_bound':lower_bound}]*len(constraint_functions)
    #constraints = setup_inequality_constraints(
    #    constraint_functions,constraints_info,uq_nominal_sample)

    constraint_info = {'lower_bound':lower_bound,
                       'uq_nominal_sample':uq_nominal_sample}
    individual_constraints = [
        DeterministicConstraint(f,constraint_info)
        for f in constraint_functions]
    constraints = MultipleConstraints(individual_constraints)
    
    optim_options={'ftol': 1e-9, 'disp': 3, 'maxiter':1000}
    res, opt_history = run_design(
        objective,init_design_sample,constraints,bounds,optim_options)

    nsamples = 10000
    uq_samples = generate_independent_random_samples(uq_vars,nsamples)
    return objective,constraints,constraint_functions,uq_samples,res,\
        opt_history
Exemplo n.º 5
0
    def test_multiply_pce(self):
        np.random.seed(1)
        np.set_printoptions(precision=16)
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 1, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))

        #coef1 = np.random.normal(0,1,(poly1.indices.shape[1],1))
        #coef2 = np.random.normal(0,1,(poly2.indices.shape[1],1))
        coef1 = np.arange(poly1.indices.shape[1])[:, np.newaxis]
        coef2 = np.arange(poly2.indices.shape[1])[:, np.newaxis]
        poly1.set_coefficients(coef1)
        poly2.set_coefficients(coef2)

        poly3 = poly1 * poly2
        samples = generate_independent_random_samples(variable, 10)
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))

        for order in range(4):
            poly = poly1**order
            assert np.allclose(poly(samples), poly1(samples)**order)
Exemplo n.º 6
0
    def test_multiply_multivariate_orthonormal_polynomial_expansions(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)

        degree1, degree2 = 3, 2
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        max_degrees1 = poly1.indices.max(axis=1)
        max_degrees2 = poly2.indices.max(axis=1)
        product_coefs_1d = compute_product_coeffs_1d_for_each_variable(
            poly1, max_degrees1, max_degrees2)

        indices, coefs = multiply_multivariate_orthonormal_polynomial_expansions(
            product_coefs_1d, poly1.get_indices(), poly1.get_coefficients(),
            poly2.get_indices(), poly2.get_coefficients())

        poly3 = get_polynomial_from_variable(variable)
        poly3.set_indices(indices)
        poly3.set_coefficients(coefs)

        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) * poly2(samples))
Exemplo n.º 7
0
    def test_compute_multivariate_orthonormal_basis_product(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)

        poly1 = get_polynomial_from_variable(variable)
        poly2 = get_polynomial_from_variable(variable)

        max_degrees1, max_degrees2 = [3, 3], [2, 2]
        product_coefs_1d = compute_product_coeffs_1d_for_each_variable(
            poly1, max_degrees1, max_degrees2)

        for ii in range(max_degrees1[0]):
            for jj in range(max_degrees1[1]):
                poly_index_ii, poly_index_jj = np.array([ii, jj
                                                         ]), np.array([ii, jj])

                poly1.set_indices(poly_index_ii[:, np.newaxis])
                poly1.set_coefficients(np.ones([1, 1]))
                poly2.set_indices(poly_index_jj[:, np.newaxis])
                poly2.set_coefficients(np.ones([1, 1]))

                product_indices, product_coefs = \
                    compute_multivariate_orthonormal_basis_product(
                        product_coefs_1d, poly_index_ii, poly_index_jj,
                        max_degrees1, max_degrees2)

                poly_prod = get_polynomial_from_variable(variable)
                poly_prod.set_indices(product_indices)
                poly_prod.set_coefficients(product_coefs)

                samples = generate_independent_random_samples(variable, 5)
                # print(poly_prod(samples),poly1(samples)*poly2(samples))
            assert np.allclose(poly_prod(samples),
                               poly1(samples) * poly2(samples))
Exemplo n.º 8
0
    def test_pce_jacobian(self):
        degree = 2

        alpha_stat, beta_stat = 2, 3
        univariate_variables = [beta(alpha_stat, beta_stat, 0, 1), norm(-1, 2)]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        var_trans = AffineRandomVariableTransformation(variable)
        num_vars = len(univariate_variables)

        poly = PolynomialChaosExpansion()
        poly_opts = define_poly_options_from_variable_transformation(var_trans)
        poly.configure(poly_opts)

        indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
        poly.set_indices(indices)

        sample = generate_independent_random_samples(variable, 1)

        coef = np.ones((indices.shape[1], 2))
        coef[:, 1] *= 2
        poly.set_coefficients(coef)

        jac = poly.jacobian(sample)
        from pyapprox.optimization import approx_jacobian
        fd_jac = approx_jacobian(lambda x: poly(x[:, np.newaxis])[0, :],
                                 sample[:, 0])
        assert np.allclose(jac, fd_jac)
    def test_identity_map_subset(self):
        num_vars = 3
        var_trans = define_iid_random_variable_transformation(
            uniform(0, 1), num_vars)
        var_trans.set_identity_maps([1])

        samples = np.random.uniform(0, 1, (num_vars, 4))
        canonical_samples = var_trans.map_to_canonical_space(samples)
        assert np.allclose(canonical_samples[1, :], samples[1, :])

        assert np.allclose(
            var_trans.map_from_canonical_space(canonical_samples), samples)

        univariate_variables = [
            uniform(-1, 2), beta(1, 1, -1, 2), norm(-1, np.sqrt(4)), uniform(),
            uniform(-1, 2), beta(2, 1, -2, 3)]
        var_trans = AffineRandomVariableTransformation(univariate_variables)
        var_trans.set_identity_maps([4, 2])

        from pyapprox.probability_measure_sampling import \
            generate_independent_random_samples
        samples = generate_independent_random_samples(var_trans.variable, 10)
        canonical_samples = var_trans.map_to_canonical_space(samples)
        assert np.allclose(canonical_samples[[2, 4], :], samples[[2, 4], :])

        assert np.allclose(
            var_trans.map_from_canonical_space(canonical_samples), samples)
Exemplo n.º 10
0
    def test_feed_forward_system_of_polynomials_estimate_coupling_bounds(self):
        graph, variables, graph_data = get_3_recursive_polynomial_components()
        # overwrite functions, they have the same network structure so this
        # is fine
        graph_data['functions'] = get_polynomial_models()
        graph = build_chain_graph(3, graph_data)
        network = SystemNetwork(graph)
        approx = DecoupledSystemSurrogate(network,
                                          variables,
                                          estimate_coupling_ranges=True,
                                          verbose=2,
                                          nrefinement_samples=1e4)

        univariate_quad_rule_info = [
            clenshaw_curtis_in_polynomial_order, clenshaw_curtis_rule_growth
        ]
        refinement_indicator = None
        options = [{
            'univariate_quad_rule_info': univariate_quad_rule_info,
            'max_nsamples': 30,
            'tol': 0,
            'verbose': 0,
            'refinement_indicator': refinement_indicator
        }] * 3
        surr_graph = approx.surrogate_network.graph

        # under estimate coupling ranges
        output_bounds = [[0.5, 1.5], [2, 5]]
        coupling_variables = {
            0: [],
            1: [stats.uniform(output_bounds[0][0], np.diff(output_bounds[0]))],
            2: [stats.uniform(output_bounds[1][0], np.diff(output_bounds[1]))]
        }
        approx.set_coupling_variables(coupling_variables)
        approx.initialize_component_surrogates(options)
        approx.build(terminate_test=TerminateTest(max_work=30))

        nvalidation_samples = 10
        validation_samples = generate_independent_random_samples(
            variables, nvalidation_samples)
        validation_values = network(validation_samples,
                                    component_ids=[0, 1, 2])

        validation_values_approx = []
        for node_id in surr_graph.nodes:
            component_surr = surr_graph.nodes[node_id]['functions']
            idx = graph_data["global_random_var_indices"][node_id]
            if node_id == 0:
                component_vals_approx = component_surr(
                    validation_samples[idx, :])
            else:
                jdx = graph_data["local_random_var_indices"][node_id][0]
                component_samples = np.empty((2, validation_samples.shape[1]))
                component_samples[jdx] = validation_samples[idx, :]
                component_samples[1 - jdx] = validation_values_approx[-1].T
                component_vals_approx = component_surr(component_samples)
            validation_values_approx.append(component_vals_approx)
            assert np.allclose(validation_values[node_id],
                               component_vals_approx)
        assert np.allclose(approx(validation_samples), validation_values[-1])
Exemplo n.º 11
0
    def test_peer_feed_forward_system_of_polynomials_multiple_qoi(self):
        graph, variables, graph_data = \
            get_3_peer_polynomial_components_multiple_qoi()
        network = SystemNetwork(graph)

        nsamples = 10
        samples = generate_independent_random_samples(variables, nsamples)
        values = network(samples)

        component_nvars = network.component_nvars()
        assert component_nvars == [1, 2, 4]

        funs = graph_data['functions']
        global_random_var_indices = graph_data['global_random_var_indices']
        values0 = funs[0](samples[global_random_var_indices[0], :])
        values1 = funs[1](samples[global_random_var_indices[1], :])
        values2 = funs[2](np.vstack([
            samples[global_random_var_indices[2], :], values1.T, values0[:,
                                                                         1:2].T
        ]))

        assert np.allclose(values, values2)

        network_values = network(samples, [0, 1, 2])
        assert np.allclose(network_values[0], values0)
        assert np.allclose(network_values[1], values1)
        assert np.allclose(network_values[2], values2)

        # test when component_ids are specified
        ncomponent_outputs = [2, 2, 1]
        ncomponent_coupling_vars = [0, 0, 3]
        noutputs = np.sum(ncomponent_outputs)
        ncoupling_vars = np.sum(ncomponent_coupling_vars)
        adjacency_matrix = np.zeros((ncoupling_vars, noutputs))
        adjacency_matrix[0, 2] = 1
        adjacency_matrix[1, 3] = 1
        adjacency_matrix[2, 1] = 1
        adjacency_matrix = scipy_sparse.csr_matrix(adjacency_matrix)

        exog_extraction_indices = [[0], [0, 1], [2]]
        coup_extraction_indices = [[], [], [0, 1, 2]]
        qoi_extraction_indices = [0, 2, 3, 4]

        network = GaussJacobiSystemNetwork(graph)
        network.set_adjacency_matrix(adjacency_matrix)
        network.set_extraction_indices(exog_extraction_indices,
                                       coup_extraction_indices,
                                       qoi_extraction_indices,
                                       ncomponent_outputs)
        network.set_initial_coupling_sample(np.ones((ncoupling_vars, 1)))
        component_ids = [0, 1, 2]
        outputs = network(samples, component_ids, init_coup_samples=None)
        true_outputs = [values0, values1, values2]
        # print(outputs[0], true_outputs[0])
        # print(outputs[1], true_outputs[1])
        # print(outputs[2], true_outputs[2])
        assert np.allclose(np.hstack(outputs), np.hstack(true_outputs))

        outputs = network(samples, component_ids=None, init_coup_samples=None)
        assert np.allclose(outputs, np.hstack(true_outputs)[:, [0, 2, 3, 4]])
Exemplo n.º 12
0
def increment_probability_samples(pce,
                                  cond_tol,
                                  samples,
                                  indices,
                                  new_indices,
                                  verbosity=0):
    r"""
    Parameters
    ----------
    samples : np.ndarray
        samples in the canonical domain of the polynomial

    Returns
    -------
    new_samples : np.ndarray(nvars, nnew_samples)
        New samples appended to samples. must be in canonical space
    """
    # allocate at one sample for every new basis
    tmp = generate_independent_random_samples(pce.var_trans.variable,
                                              new_indices.shape[1])
    tmp = pce.var_trans.map_to_canonical_space(tmp)
    new_samples = np.hstack((samples, tmp))
    # keep sampling until condition number is below cond_tol
    new_samples = generate_probability_samples_tolerance(
        pce, new_indices.shape[1], cond_tol, new_samples, verbosity)
    if verbosity > 0:
        print('No. samples', new_samples.shape[1])
        print('No. initial samples', samples.shape[1])
        print('No. indices', indices.shape[1], pce.indices.shape[1])
        print('No. new indices', new_indices.shape[1])
        print('No. new samples', new_samples.shape[1] - samples.shape[1])
    return new_samples
Exemplo n.º 13
0
 def allocate_initial_samples(self):
     if self.induced_sampling:
         return generate_induced_samples_migliorati_tolerance(
             self.pce, self.cond_tol)
     else:
         return generate_independent_random_samples(
             self.pce.var_trans.variable,
             self.sample_ratio * self.pce.num_terms())
Exemplo n.º 14
0
def analytic_sobol_indices_from_gaussian_process(
        gp, variable, interaction_terms, ngp_realizations=1,
        stat_functions=(np.mean, np.median, np.min, np.max),
        ninterpolation_samples=500, nvalidation_samples=100,
        ncandidate_samples=1000, nquad_samples=50, use_cholesky=True, alpha=0):

    x_train, y_train, K_inv, lscale, kernel_var, transform_quad_rules = \
        extract_gaussian_process_attributes_for_integration(gp)

    if ngp_realizations > 0:
        gp_realizations = generate_gp_realizations(
            gp, ngp_realizations, ninterpolation_samples, nvalidation_samples,
            ncandidate_samples, variable, use_cholesky, alpha)

        # Check how accurate realizations
        validation_samples = generate_independent_random_samples(
            variable, 1000)
        mean_vals, std = gp(validation_samples, return_std=True)
        realization_vals = gp_realizations(validation_samples)
        print(mean_vals[:, 0].mean())
        # print(std,realization_vals.std(axis=1))
        print('std of realizations error',
              np.linalg.norm(std-realization_vals.std(axis=1))/np.linalg.norm(
                  std))
        print('var of realizations error',
              np.linalg.norm(std**2-realization_vals.var(axis=1)) /
              np.linalg.norm(std**2))

        print('mean interpolation error',
              np.linalg.norm((mean_vals[:, 0]-realization_vals[:, -1])) /
              np.linalg.norm(mean_vals[:, 0]))

        x_train = gp_realizations.selected_canonical_samples
        # gp_realizations.train_vals is normalized so unnormalize
        y_train = gp._y_train_std*gp_realizations.train_vals
        # kernel_var has already been adjusted by call to
        # extract_gaussian_process_attributes_for_integration
        K_inv = np.linalg.inv(gp_realizations.L.dot(gp_realizations.L.T))
        K_inv /= gp._y_train_std**2

    sobol_values, total_values, means, variances = \
        _compute_expected_sobol_indices(
            gp, variable, interaction_terms, nquad_samples,
            x_train, y_train, K_inv, lscale, kernel_var,
            transform_quad_rules, gp._y_train_mean)
    sobol_values = sobol_values.T
    total_values = total_values.T

    result = dict()
    data = [sobol_values, total_values, variances, means]
    data_names = ['sobol_indices', 'total_effects', 'variance', 'mean']
    for item, name in zip(data, data_names):
        subdict = dict()
        for ii, sfun in enumerate(stat_functions):
            subdict[sfun.__name__] = sfun(item, axis=(0))
        subdict['values'] = item
        result[name] = subdict
    return result
Exemplo n.º 15
0
    def test_beta_2d_preconditioning(self):
        """
        Interpolate a set of points using preconditioing. First select
        all initial points then adding a subset of the remaining points.

        x in Beta(2,5)[0,1]^2
        """

        num_vars = 2
        alpha_stat = 2
        beta_stat = 5
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat, beta_stat, -1, 2), num_vars)
        pce_opts = define_poly_options_from_variable_transformation(var_trans)

        # Set oli options
        oli_opts = {'verbosity': 0, 'assume_non_degeneracy': False}

        basis_generator = \
            lambda num_vars, degree: (degree+1, compute_hyperbolic_level_indices(
                num_vars, degree, 1.0))

        # from scipy.special import beta as beta_fn
        # def beta_pdf(x,alpha_poly,beta_poly):
        #     values = (1.-x)**(alpha_poly) * (1.+x)**(beta_poly)
        #     values /= 2.**(beta_poly+alpha_poly+1)*beta_fn(
        #         beta_poly+1,alpha_poly+1)
        #     return values
        # univariate_pdf = partial(beta_pdf,alpha_poly=beta_stat-1,beta_poly=alpha_stat-1)

        univariate_beta_pdf = partial(beta.pdf, a=alpha_stat, b=beta_stat)

        def univariate_pdf(x):
            return univariate_beta_pdf((x + 1.) / 2.) / 2.

        preconditioning_function = partial(tensor_product_pdf,
                                           univariate_pdfs=univariate_pdf)

        # define target function
        def model(x):
            return np.asarray([(x[0]**2 - 1) + (x[1]**2 - 1) + x[0] * x[1]]).T

        # define points to interpolate
        pts = generate_independent_random_samples(var_trans.variable, 12)
        initial_pts = np.array([pts[:, 0]]).T

        helper_least_factorization(
            pts,
            model,
            var_trans,
            pce_opts,
            oli_opts,
            basis_generator,
            initial_pts=initial_pts,
            max_num_pts=12,
            preconditioning_function=preconditioning_function)
Exemplo n.º 16
0
 def increment_samples(self,current_poly_indices,unique_poly_indices):
     if self.induced_sampling:
         samples = increment_induced_samples_migliorati(
             self.pce,self.cond_tol,self.samples,
             current_poly_indices, unique_poly_indices)
     else:
         samples = generate_independent_random_samples(self.pce.var_trans.variable,self.sample_ratio*unique_poly_indices.shape[1])
         samples = self.pce.var_trans.map_to_canonical_space(samples)
         samples = np.hstack([self.samples,samples])
     return samples
Exemplo n.º 17
0
def compute_l2_error(f, g, variable, nsamples, rel=False):
    r"""
    Compute the :math:`\ell^2` error of the output of two functions f and g, i.e.

    .. math:: \lVertf(z)-g(z)\rVert\approx \sum_{m=1}^M f(z^{(m)})

    from a set of random draws :math:`\mathcal{Z}=\{z^{(m)}\}_{m=1}^M` 
    from the PDF of :math:`z`.

    Parameters
    ----------
    f : callable
        Function with signature
    
        ``g(z) -> np.ndarray``

        where ``z`` is a 2D np.ndarray with shape (nvars,nsamples) and the
        output is a 2D np.ndarray with shaoe (nsamples,nqoi)

    g : callable
        Function with signature
    
        ``f(z) -> np.ndarray``

        where ``z`` is a 2D np.ndarray with shape (nvars,nsamples) and the
        output is a 2D np.ndarray with shaoe (nsamples,nqoi)

    variable : pya.IndependentMultivariateRandomVariable
        Object containing information of the joint density of the inputs z.
        This is used to generate random samples from this join density

    nsamples : integer
        The number of samples used to compute the :math:`\ell^2` error

    rel : boolean
        True - compute relative error
        False - compute absolute error
    
    Returns
    -------
    error : np.ndarray (nqoi)
    """

    validation_samples = generate_independent_random_samples(
        variable, nsamples)
    validation_vals = f(validation_samples)
    approx_vals = g(validation_samples)
    assert validation_vals.shape == approx_vals.shape
    error = np.linalg.norm(approx_vals - validation_vals, axis=0)
    if not rel:
        error /= np.sqrt(validation_samples.shape[1])
    else:
        error /= np.linalg.norm(validation_vals, axis=0)
    return error
Exemplo n.º 18
0
    def test_bayesian_importance_sampling_avar(self):
        np.random.seed(1)
        nrandom_vars = 2
        Amat = np.array([[-0.5, 1]])
        noise_std = 0.1
        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 1)] * nrandom_vars)
        prior_mean = prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)
        noise_cov_inv = np.eye(Amat.shape[0]) / noise_std**2
        true_sample = np.array([.4] * nrandom_vars)[:, None]
        collected_obs = Amat.dot(true_sample)
        collected_obs += np.random.normal(0, noise_std, (collected_obs.shape))
        exact_post_mean, exact_post_cov = \
            laplace_posterior_approximation_for_linear_models(
                Amat, prior_mean, prior_cov_inv, noise_cov_inv,
                collected_obs)

        chol_factor = np.linalg.cholesky(exact_post_cov)
        chol_factor_inv = np.linalg.inv(chol_factor)

        def g_model(samples):
            return np.exp(
                np.sum(chol_factor_inv.dot(samples - exact_post_mean),
                       axis=0))[:, None]

        nsamples = int(1e6)
        prior_samples = generate_independent_random_samples(
            prior_variable, nsamples)
        posterior_samples = chol_factor.dot(
            np.random.normal(0, 1, (nrandom_vars, nsamples))) + exact_post_mean

        g_mu, g_sigma = 0, np.sqrt(nrandom_vars)
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(g_mu, g_sigma)

        beta = .1
        cvar_exact = CVaR(beta)

        cvar_mc = conditional_value_at_risk(g_model(posterior_samples), beta)

        prior_pdf = prior_variable.pdf
        post_pdf = stats.multivariate_normal(mean=exact_post_mean[:, 0],
                                             cov=exact_post_cov).pdf
        weights = post_pdf(prior_samples.T) / prior_pdf(prior_samples)[:, 0]
        weights /= weights.sum()
        cvar_im = conditional_value_at_risk(g_model(prior_samples), beta,
                                            weights)
        # print(cvar_exact, cvar_mc, cvar_im)
        assert np.allclose(cvar_exact, cvar_mc, rtol=1e-3)
        assert np.allclose(cvar_exact, cvar_im, rtol=2e-3)
Exemplo n.º 19
0
    def allocate_initial_samples(self):
        if self.induced_sampling:
            return generate_induced_samples_migliorati_tolerance(
                self.pce, self.cond_tol)

        if self.cond_tol < 0:
            sample_ratio = -self.cond_tol
            return generate_independent_random_samples(
                self.pce.var_trans.variable,
                sample_ratio * self.pce.num_terms())

        return generate_probability_samples_tolerance(self.pce,
                                                      self.pce.num_terms(),
                                                      self.cond_tol)
Exemplo n.º 20
0
    def test_peer_feed_forward_system_of_polynomials(self):
        graph, variables, graph_data, system_fun = \
            get_3_peer_polynomial_components()
        network = SystemNetwork(graph)

        nsamples = 10
        samples = generate_independent_random_samples(variables, nsamples)
        values = network(samples, [0, 1, 2])

        component_nvars = network.component_nvars()
        assert component_nvars == [1, 2, 3]

        true_values = system_fun(samples)
        assert np.allclose(values, true_values)
Exemplo n.º 21
0
    def test_add_pce(self):
        univariate_variables = [norm(), uniform()]
        variable = IndependentMultivariateRandomVariable(univariate_variables)
        degree1, degree2 = 2, 3
        poly1 = get_polynomial_from_variable(variable)
        poly1.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree1))
        poly1.set_coefficients(
            np.random.normal(0, 1, (poly1.indices.shape[1], 1)))
        poly2 = get_polynomial_from_variable(variable)
        poly2.set_indices(
            compute_hyperbolic_indices(variable.num_vars(), degree2))
        poly2.set_coefficients(
            np.random.normal(0, 1, (poly2.indices.shape[1], 1)))

        poly3 = poly1 + poly2 + poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly3(samples), poly1(samples) + 2 * poly2(samples))

        poly4 = poly1 - poly2
        samples = generate_independent_random_samples(variable, 10)
        # print(poly3(samples),poly1(samples)*poly2(samples))
        assert np.allclose(poly4(samples), poly1(samples) - poly2(samples))
Exemplo n.º 22
0
    def test_gauss_jacobi_fpi_feedback(self):
        ncomponent_outputs = [2, 2, 1]
        ncomponent_coupling_vars = [2, 2, 2]
        noutputs = np.sum(ncomponent_outputs)
        ncoupling_vars = np.sum(ncomponent_coupling_vars)
        adjacency_matrix = np.zeros((ncoupling_vars, noutputs))

        adjacency_matrix[0, 0] = 1  # xi_00 = C1
        adjacency_matrix[1, 2] = 1  # xi_01 = C2
        adjacency_matrix[2, 0] = 1  # xi_10 = C1
        adjacency_matrix[3, 2] = 1  # xi_11 = C2
        adjacency_matrix[4, 1] = 1  # xi_20 = y1
        adjacency_matrix[5, 3] = 1  # xi_21 = y2
        adjacency_matrix = scipy_sparse.csr_matrix(adjacency_matrix)
        # plot_adjacency_matrix(
        #     adjacency_matrix, (ncomponent_coupling_vars, ncomponent_outputs))
        # from matplotlib import pyplot as plt
        # plt.show()

        component_funs, variables = get_chaudhuri_3_component_system()

        # output_extraction_indices = [[0, 1], [1, 2], [3]]
        exog_extraction_indices = [[0, 1, 2], [0, 3, 4], []]
        coup_extraction_indices = [[0, 1], [2, 3], [4, 5]]

        nsamples = 10
        exog_samples = generate_independent_random_samples(
            variables, nsamples - 1)
        exog_samples = np.hstack(
            (exog_samples, variables.get_statistics("mean")))
        init_coup_samples = 5 * np.ones((adjacency_matrix.shape[0], nsamples))

        outputs = gauss_jacobi_fixed_point_iteration(adjacency_matrix,
                                                     exog_extraction_indices,
                                                     coup_extraction_indices,
                                                     component_funs,
                                                     init_coup_samples,
                                                     exog_samples,
                                                     tol=1e-12,
                                                     max_iters=20,
                                                     verbose=0,
                                                     anderson_memory=1)[0]

        # Mathematica Solution
        # Solve[(0.02 + (9.7236*x + 0.2486*y)/Sqrt[x^2 + y^2] - x == 0) &&
        # 0.03 + (9.7764*y + 0.2486*x)/Sqrt[x^2 + y^2] - y == 0, {x, y}]
        # print(outputs[-1, [0, 2]], [6.63852, 7.52628])
        assert np.allclose(outputs[-1, [0, 2]], [6.63852, 7.52628])
Exemplo n.º 23
0
def find_uncertainty_aware_beam_design(constraint_type='quantile'):

    quantile=0.1
    print(quantile)
    quantile_lower_bound = 0

    uq_vars, objective, constraint_functions, bounds, init_design_sample = \
        setup_beam_design()
    if constraint_type=='quantile':
        constraint_info = [{'type':'quantile',
                             'lower_bound':quantile_lower_bound,
                             'quantile':quantile}]*len(constraint_functions)
    elif constraint_type=='cvar':
        constraint_info = [{'type':'cvar',
                             'lower_bound':0.05,
                             'quantile':quantile,
                             'smoothing_eps':1e-1}]*len(
                                 constraint_functions)
    else:
        raise Exception()

    nsamples=10000
    uq_samples = generate_independent_random_samples(uq_vars,nsamples)
    #constraints = setup_inequality_constraints(
    #    constraint_functions,constraint_info,uq_samples)

    def generate_samples():
        # always use the same samples avoid noise in constraint vals
        np.random.seed(1)
        return uq_samples
    individual_constraints = [
        MCStatisticConstraint(f,generate_samples,constraint_info[0])
        for f in constraint_functions]
    constraints = MultipleConstraints(individual_constraints)


    optim_options={'ftol': 1e-9, 'disp': 3, 'maxiter':1000}
    res, opt_history = run_design(
        objective,init_design_sample,constraints,bounds,optim_options)

    return objective,constraints,constraint_functions,uq_samples,res,\
        opt_history
Exemplo n.º 24
0
    def test_recursive_feed_forward_system_of_polynomials(self):
        graph, variables, graph_data = get_3_recursive_polynomial_components()
        network = SystemNetwork(graph)

        nsamples = 10
        samples = generate_independent_random_samples(variables, nsamples)
        values = network(samples)

        component_nvars = network.component_nvars()
        assert component_nvars == [1, 2, 2]

        funs = graph_data['functions']
        global_random_var_indices = graph_data['global_random_var_indices']
        values0 = funs[0](samples[global_random_var_indices[0], :])
        values1 = funs[1](np.vstack(
            [values0.T, samples[global_random_var_indices[1], :]]))
        true_values = funs[2](np.vstack(
            [samples[global_random_var_indices[2], :], values1.T]))

        assert np.allclose(values, true_values)
    def helper(self, function, var_trans, pce, max_level, error_tol):
        max_level_1d = [max_level]*(pce.num_vars)
        max_num_samples = 100

        admissibility_function = partial(
            max_level_admissibility_function, max_level, max_level_1d,
            max_num_samples, error_tol)
        refinement_indicator = variance_pce_refinement_indicator

        pce.set_function(function, var_trans)
        pce.set_refinement_functions(
            refinement_indicator, admissibility_function,
            clenshaw_curtis_rule_growth)
        pce.build()
        validation_samples = generate_independent_random_samples(
            var_trans.variable, int(1e3))
        validation_vals = function(validation_samples)
        pce_vals = pce(validation_samples)
        error = np.linalg.norm(pce_vals-validation_vals)/np.sqrt(
            validation_samples.shape[1])
        return error, pce
Exemplo n.º 26
0
def get_coupling_variables_via_sampling(network,
                                        random_variables,
                                        nsamples,
                                        expansion_factor=0.1,
                                        filename=None):
    """
    Compute the bounds on the coupling variables (output of upstream models)
    using Monte Carlo sampling. Return uniform variables over a slgithly larger
    range
    """
    if filename is not None and os.path.exists(filename):
        print(f'loading file {filename}')
        values = np.load(filename)['values']
    else:
        samples = generate_independent_random_samples(random_variables,
                                                      nsamples)
        component_ids = np.arange(len(network.graph.nodes))
        values = network(samples, component_ids)
        if filename is not None:
            np.savez(filename, values=values, samples=samples)
    coupling_bounds = np.array([[v.min(axis=0), v.max(axis=0)]
                                for v in values])
    coupling_variables = {}

    graph = network.graph
    for jj in graph.nodes:
        coupling_variables[jj] = []
        indices = graph.nodes[jj]['global_coupling_component_indices']
        for ii in range(len(indices) // 2):
            lb = coupling_bounds[indices[2 * ii]][0][indices[2 * ii + 1]]
            ub = coupling_bounds[indices[2 * ii]][1][indices[2 * ii + 1]]
            diff = ub - lb
            lb = lb - diff * expansion_factor / 2
            ub = ub + diff * expansion_factor / 2
            coupling_variables[jj].append(stats.uniform(lb, ub - lb))
    return coupling_variables
def genz_example(max_num_samples, precond_type):
    error_tol = 1e-12

    univariate_variables = [uniform(), beta(3, 3)]
    variable = IndependentMultivariateRandomVariable(univariate_variables)
    var_trans = AffineRandomVariableTransformation(variable)

    c = np.array([10, 0.00])
    model = GenzFunction("oscillatory",
                         variable.num_vars(),
                         c=c,
                         w=np.zeros_like(c))
    # model.set_coefficients(4,'exponential-decay')

    validation_samples = generate_independent_random_samples(
        var_trans.variable, int(1e3))
    validation_values = model(validation_samples)

    errors = []
    num_samples = []

    def callback(pce):
        error = compute_l2_error(validation_samples, validation_values, pce)
        errors.append(error)
        num_samples.append(pce.samples.shape[1])

    candidate_samples = -np.cos(
        np.random.uniform(0, np.pi, (var_trans.num_vars(), int(1e4))))
    pce = AdaptiveLejaPCE(var_trans.num_vars(),
                          candidate_samples,
                          factorization_type='fast')
    if precond_type == 'density':

        def precond_function(basis_matrix, samples):
            trans_samples = var_trans.map_from_canonical_space(samples)
            vals = np.ones(samples.shape[1])
            for ii in range(len(univariate_variables)):
                rv = univariate_variables[ii]
                vals *= np.sqrt(rv.pdf(trans_samples[ii, :]))
            return vals
    elif precond_type == 'christoffel':
        precond_function = chistoffel_preconditioning_function
    else:
        raise Exception(f'Preconditioner: {precond_type} not supported')
    pce.set_preconditioning_function(precond_function)

    max_level = np.inf
    max_level_1d = [max_level] * (pce.num_vars)

    admissibility_function = partial(max_level_admissibility_function,
                                     max_level, max_level_1d, max_num_samples,
                                     error_tol)

    growth_rule = partial(constant_increment_growth_rule, 2)
    #growth_rule = clenshaw_curtis_rule_growth
    pce.set_function(model, var_trans)
    pce.set_refinement_functions(variance_pce_refinement_indicator,
                                 admissibility_function, growth_rule)

    while (not pce.active_subspace_queue.empty()
           or pce.subspace_indices.shape[1] == 0):
        pce.refine()
        pce.recompute_active_subspace_priorities()
        if callback is not None:
            callback(pce)

    from pyapprox.sparse_grid import plot_sparse_grid_2d
    plot_sparse_grid_2d(pce.samples, np.ones(pce.samples.shape[1]),
                        pce.pce.indices, pce.subspace_indices)

    plt.figure()
    plt.loglog(num_samples, errors, 'o-')
    plt.show()
Exemplo n.º 28
0
input_path = file_settings()[1]
filename = f'{input_path}parameter.csv'
variable = variables_prep(filename, product_uniform=False)
index_product = np.load(f'{input_path}index_product.npy', allow_pickle=True)

# Check whether the Beta distribution is a proper option
filename = f'{input_path}parameter-adjust.csv'
param_adjust = pd.read_csv(filename)
beta_index = param_adjust[param_adjust['distribution']== 'beta'].\
            index.to_list()
# prepare the loc and scale argument for Beta fit
locs = np.array(param_adjust.loc[beta_index, ['min', 'max']])
locs[:, 1] = locs[:, 1] - locs[:, 0]
param_names = param_adjust.loc[beta_index, 'Veneer_name'].values
num_samples = 20000
samples_uniform = generate_independent_random_samples(variable, num_samples)
beta_fit = np.zeros(shape=(len(param_names), 4))

for ii in range(list(index_product.shape)[0]):
    index_temp = index_product[ii]
    samples_uniform[index_temp[0], :] = np.prod(samples_uniform[index_temp, :],
                                                axis=0)
    # fit the Beta distribution
    rv_product = samples_uniform[index_temp[0]]
    beta_aguments = beta.fit(rv_product, floc=locs[ii][0], fscale=locs[ii][1])
    beta_fit[ii, :] = np.round(beta_aguments, 4)
    # calculate the KS-statistic
    num_boot = 1000
    ks_stat = []

    for i in range(num_boot):
Exemplo n.º 29
0
    def test_gauss_jacobi_fpi_peer(self):
        ncomponent_outputs = [1, 1, 1]
        ncomponent_coupling_vars = [0, 0, 2]
        noutputs = np.sum(ncomponent_outputs)
        ncoupling_vars = np.sum(ncomponent_coupling_vars)
        adjacency_matrix = np.zeros((ncoupling_vars, noutputs))

        adjacency_matrix[0, 0] = 1
        adjacency_matrix[1, 1] = 1
        adjacency_matrix = scipy_sparse.csr_matrix(adjacency_matrix)
        # plot_adjacency_matrix(adjacency_matrix, component_shapes)
        # from matplotlib import pyplot as plt
        # plt.show()

        # output_extraction_indices = [[0], [1], [2]]
        exog_extraction_indices = [[0], [0, 1], [2]]
        coup_extraction_indices = [[], [], [1, 0]]
        qoi_ext_indices = [2]

        graph, variables, graph_data, system_fun = \
            get_3_peer_polynomial_components()
        component_funs = graph_data["functions"]

        nsamples = 10
        exog_samples = generate_independent_random_samples(variables, nsamples)
        init_coup_samples = np.ones((adjacency_matrix.shape[0], nsamples))

        outputs = gauss_jacobi_fixed_point_iteration(adjacency_matrix,
                                                     exog_extraction_indices,
                                                     coup_extraction_indices,
                                                     component_funs,
                                                     init_coup_samples,
                                                     exog_samples,
                                                     tol=1e-15,
                                                     max_iters=100,
                                                     verbose=0)[0]

        true_outputs = system_fun(exog_samples)
        assert np.allclose(outputs, np.hstack(true_outputs))

        # test when component_ids are specified
        network = GaussJacobiSystemNetwork(graph)
        network.set_adjacency_matrix(adjacency_matrix)
        network.set_extraction_indices(exog_extraction_indices,
                                       coup_extraction_indices,
                                       qoi_ext_indices, ncomponent_outputs)
        component_ids = [0, 1, 2]
        outputs = network(exog_samples,
                          component_ids,
                          init_coup_samples=init_coup_samples)
        assert np.allclose(np.hstack(outputs), np.hstack(true_outputs))

        # test when component_ids are not specified and so qoi indices are
        # needed
        qoi_ext_indices = [2]  # return qoi of last model
        network = GaussJacobiSystemNetwork(graph)
        network.set_adjacency_matrix(adjacency_matrix)
        network.set_extraction_indices(exog_extraction_indices,
                                       coup_extraction_indices,
                                       qoi_ext_indices, ncomponent_outputs)
        component_ids = None
        outputs = network(exog_samples,
                          component_ids,
                          init_coup_samples=init_coup_samples)
        # print(outputs, true_outputs[-1])
        assert np.allclose(outputs, true_outputs[-1])
Exemplo n.º 30
0
def helper_least_factorization(pts, model, var_trans, pce_opts, oli_opts,
                               basis_generator,
                               max_num_pts=None, initial_pts=None,
                               pce_degree=None,
                               preconditioning_function=None,
                               verbose=False,
                               points_non_degenerate=False,
                               exact_mean=None):

    num_vars = pts.shape[0]

    pce = PolynomialChaosExpansion()
    pce.configure(pce_opts)

    oli_solver = LeastInterpolationSolver()
    oli_solver.configure(oli_opts)
    oli_solver.set_pce(pce)
    
    if preconditioning_function is not None:
        oli_solver.set_preconditioning_function(preconditioning_function)
        
    oli_solver.set_basis_generator(basis_generator)

    if max_num_pts is None:
        max_num_pts = pts.shape[1]

    if initial_pts is not None:
        # find unique set of points and separate initial pts from pts
        # this allows for cases when
        # (1) pts intersect initial_pts = empty
        # (2) pts intersect initial_pts = initial pts
        # (3) 0 < #(pts intersect initial_pts) < #initial_pts
        pts = remove_common_rows([pts.T,initial_pts.T]).T

    oli_solver.factorize(
        pts, initial_pts,
        num_selected_pts = max_num_pts)

    permuted_pts = oli_solver.get_current_points()

    permuted_vals = model( permuted_pts )
    pce = oli_solver.get_current_interpolant(
        permuted_pts, permuted_vals)

    assert permuted_pts.shape[1] == max_num_pts

    # Ensure pce interpolates the training data
    pce_vals = pce.value( permuted_pts )
    assert np.allclose( permuted_vals, pce_vals )

    # Ensure pce exactly approximates the polynomial test function (model)
    test_pts = generate_independent_random_samples(var_trans.variable,num_samples=10)
    test_vals = model(test_pts)
    #print 'p',test_pts.T
    pce_vals = pce.value(test_pts)
    L,U,H=oli_solver.get_current_LUH_factors()
    #print L
    #print U
    #print test_vals
    #print pce_vals
    #print 'coeff',pce.get_coefficients()
    #print oli_solver.selected_basis_indices
    assert np.allclose( test_vals, pce_vals )
    
    if initial_pts is not None:
        temp = remove_common_rows([permuted_pts.T,initial_pts.T]).T
        assert temp.shape[1]==max_num_pts-initial_pts.shape[1]
        if oli_solver.enforce_ordering_of_initial_points:
            assert np.allclose(
                initial_pts,permuted_pts[:,:initial_pts.shape[0]])
        elif not oli_solver.get_initial_points_degenerate():
            assert allclose_unsorted_matrix_rows(
                initial_pts.T, permuted_pts[:,:initial_pts.shape[1]].T)
        else:
            # make sure that oli tried again to add missing initial
            # points after they were found to be degenerate
            # often adding one new point will remove degeneracy
            assert oli_solver.get_num_initial_points_selected()==\
              initial_pts.shape[1]
            P = oli_solver.get_current_permutation()
            I = np.where(P<initial_pts.shape[1])[0]
            assert_allclose_unsorted_matrix_cols(
                initial_pts,permuted_pts[:,I])

    basis_generator = oli_solver.get_basis_generator()
    max_degree = oli_solver.get_current_degree()
    basis_cardinality = oli_solver.get_basis_cardinality()
    num_terms = 0
    for degree in range(max_degree):
        __,indices = basis_generator(num_vars,degree)
        num_terms += indices.shape[1]
        assert num_terms == basis_cardinality[degree]

    if points_non_degenerate:
        degree_list = oli_solver.get_points_to_degree_map()
        num_terms = 1
        degree = 0
        num_pts = permuted_pts.shape[1]
        for i in range(num_pts):
            # test assumes non-degeneracy
            if i>=num_terms:
                degree+=1
                indices = PolyIndexVector()
                basis_generator.get_degree_basis_indices(
                    num_vars,degree,indices)
                num_terms += indices.size()
            assert degree_list[i] == degree

    if exact_mean is not None:
        mean = pce.get_coefficients()[0,0]
        assert np.allclose(mean,exact_mean)