Ejemplo n.º 1
0
    def test_multiply_multivariate_polynomials(self):
        num_vars = 2
        degree1 = 1
        degree2 = 2

        indices1 = compute_hyperbolic_indices(num_vars, degree1, 1.0)
        coeffs1 = np.ones((indices1.shape[1], 1), dtype=float)
        indices2 = compute_hyperbolic_indices(num_vars, degree2, 1.0)
        coeffs2 = 2.0*np.ones((indices2.shape[1], 1), dtype=float)

        indices, coeffs = multiply_multivariate_polynomials(
            indices1, coeffs1, indices2, coeffs2)

        samples = np.random.uniform(-1, 1, (num_vars, indices.shape[1]*3))
        values = monomial_basis_matrix(indices1, samples).dot(coeffs1)* \
            monomial_basis_matrix(indices2, samples).dot(coeffs2)
        
        true_indices = compute_hyperbolic_indices(
            num_vars, degree1+degree2, 1.0)
        basis_mat = monomial_basis_matrix(true_indices, samples)
        true_coeffs = np.linalg.lstsq(basis_mat, values, rcond=None)[0]
        true_indices, true_coeffs = compress_and_sort_polynomial(
            true_coeffs, true_indices)
        indices, coeffs = compress_and_sort_polynomial(coeffs, indices)
        assert np.allclose(true_indices, indices)
        assert np.allclose(true_coeffs, coeffs)
Ejemplo n.º 2
0
    def test_substitute_polynomial_for_variables_in_single_basis_term(self):
        """
        Substitute 
          y1 = (1+x1+x2+x1*x2)
          y2 = (2+2*x1+2*x1*x3)
        into 
          y3 = y1*x4**3*y2    (test1)

        Global ordering of variables in y3
        [y1,x4,y2] = [x1,x2,x4,x1,x3]
        Only x4ant unique variables so reduce to
        [x1,x2,x4,x3]
        """
        def y1(x):
            x1, x2 = x[:2, :]
            return 1+x1+x2+x1*x2

        def y2(x):
            x1, x3 = x[[0, 2], :]
            return 2+2*x1+2*x1*x3

        def y3(x):
            x4 = x[3, :]
            y1, y2 = x[4:, :]
            return y1**2*x4**3*y2
        
        global_var_idx = [np.array([0, 1]), np.array([0, 2])]
        indices_in = [np.array([[0, 0], [1, 0], [0, 1], [1, 1]]).T,
                      np.array([[0, 0], [1, 0], [1, 1]]).T]
        coeffs_in = [np.ones((indices_in[0].shape[1], 1)),
                     2*np.ones((indices_in[1].shape[1], 1))]

        basis_index = np.array([[2, 3, 1]]).T
        basis_coeff = np.array([[1]])
        var_indices = np.array([0, 2])
        new_indices, new_coeffs = \
            substitute_polynomials_for_variables_in_single_basis_term(
                indices_in, coeffs_in, basis_index, basis_coeff, var_indices,
                global_var_idx)
        assert new_coeffs.shape[0] == new_indices.shape[1]
        assert new_indices.shape[1] == 21

        nvars = 4
        degree = 10 # degree needs to be high enough to be able to exactly
        # represent y3 which is the composition of lower degree polynomimals
        true_indices = compute_hyperbolic_indices(nvars, degree, 1)
        nsamples = true_indices.shape[1]*3
        samples = np.random.uniform(-1, 1, (nvars, nsamples))
        values1 = y1(samples)
        values2 = y2(samples)
        values = y3(np.vstack([samples, values1[None, :], values2[None, :]]))
        basis_mat = monomial_basis_matrix(true_indices, samples)
        true_coef = np.linalg.lstsq(basis_mat, values[:, None], rcond=None)[0]
        true_indices, true_coef = compress_and_sort_polynomial(
            true_coef, true_indices)
        new_indices, new_coeffs = compress_and_sort_polynomial(
            new_coeffs, new_indices)
        assert np.allclose(new_indices, true_indices)
        # print((true_coef, new_coeffs))
        assert np.allclose(true_coef, new_coeffs)
Ejemplo n.º 3
0
    def test_inner_products_on_active_subspace(self):
        num_vars=4; num_active_vars = 2; degree=3;
        A = np.random.normal(0,1,(num_vars,num_vars))
        Q, R = np.linalg.qr( A )
        W1 = Q[:,:num_active_vars]
        
        as_poly_indices = np.asarray([
            [0,0],[1,0],[0,1],[2,0],[1,1],[0,2]
            ]).T

        x1d,w1d = np.polynomial.legendre.leggauss(10)
        w1d /=2
        gl_samples = cartesian_product([x1d]*num_vars)
        gl_weights = outer_product([w1d]*num_vars)
        as_gl_samples = np.dot(W1.T,gl_samples)

        inner_product_indices = np.empty(
            (num_active_vars,as_poly_indices.shape[1]**2),dtype=int)
        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                inner_product_indices[:,ii*as_poly_indices.shape[1]+jj]=\
                    as_poly_indices[:,ii]+as_poly_indices[:,jj]

        vandermonde = monomial_basis_matrix(inner_product_indices,as_gl_samples)

        inner_products = inner_products_on_active_subspace(
            W1.T,as_poly_indices,monomial_mean_uniform_variables)

        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                assert np.allclose(
                    inner_products[ii,jj],
                    np.dot(vandermonde[:,ii*as_poly_indices.shape[1]+jj],
                           gl_weights))
Ejemplo n.º 4
0
 def canonical_basis_matrix(self,canonical_samples,opts=dict()):
     deriv_order = opts.get('deriv_order',0)
     if self.recursion_coeffs[0] is not None:
         basis_matrix = evaluate_multivariate_orthonormal_polynomial(
             canonical_samples,self.indices,self.recursion_coeffs,
             deriv_order,self.basis_type_index_map)
     else:
         basis_matrix = monomial_basis_matrix(
             self.indices,canonical_samples,deriv_order)
     return basis_matrix
Ejemplo n.º 5
0
    def test_substitute_polynomials_for_variables_in_another_polynomial_II(self):
        global_var_idx = [np.array([0, 1, 2, 3, 4, 5]), np.array([0, 1, 2])]
        indices_in = [
            compute_hyperbolic_indices(global_var_idx[0].shape[0], 2, 1),
            compute_hyperbolic_indices(global_var_idx[1].shape[0], 3, 1)]
        coeffs_in = [np.ones((indices_in[0].shape[1], 1)),
                     2*np.ones((indices_in[1].shape[1], 1))]
        var_idx = np.array([0, 1]) # must be related to how variables
        # enter indices below
        indices = compute_hyperbolic_indices(3, 5, 1)
        coeffs = np.ones((indices.shape[1], 1))
        new_indices, new_coef = \
            substitute_polynomials_for_variables_in_another_polynomial(
                indices_in, coeffs_in, indices, coeffs, var_idx, global_var_idx)

        nsamples = 100
        nvars = np.unique(np.concatenate(global_var_idx)).shape[0] + (
            indices.shape[0] - var_idx.shape[0])
        samples = np.random.uniform(-1, 1, (nvars, nsamples))
        validation_samples = np.random.uniform(-1, 1, (nvars, 1000))
        validation_values1 =  monomial_basis_matrix(
            indices_in[0], validation_samples[global_var_idx[0], :]).dot(
                coeffs_in[0])
        validation_values2 = monomial_basis_matrix(
            indices_in[1], validation_samples[global_var_idx[1], :]).dot(
                coeffs_in[1])
        # inputs to polynomial which are not themselves polynomials
        other_global_var_idx = np.setdiff1d(
            np.arange(nvars), np.unique(np.concatenate(global_var_idx)))
        print(other_global_var_idx)
        validation_values = np.dot(
            monomial_basis_matrix(
                indices,
                np.vstack([validation_values1.T, validation_values2.T,
                           validation_samples[other_global_var_idx, :], ])),
            coeffs)
        assert np.allclose(validation_values, monomial_basis_matrix(
            new_indices, validation_samples).dot(new_coef))
 def unrotated_canonical_basis_matrix(self,canonical_samples):
     """
     Cannot just call super(APCE,self).canonical_basis because I was 
     running into inheritance problems.
     """
     deriv_order = 0
     if self.recursion_coeffs is not None:
         unrotated_basis_matrix = \
             evaluate_multivariate_orthonormal_polynomial(
                 canonical_samples,self.indices,self.recursion_coeffs,
                 deriv_order,self.basis_type_index_map)
     else:
         unrotated_basis_matrix = monomial_basis_matrix(
             self.indices,canonical_samples)
     return unrotated_basis_matrix
Ejemplo n.º 7
0
    def test_inner_products_on_active_subspace_using_samples(self):

        def generate_samples(num_samples):
            from pyapprox.low_discrepancy_sequences import \
                transformed_halton_sequence
            samples = transformed_halton_sequence(None, num_vars, num_samples)
            samples = samples*2.-1.
            return samples

        num_vars = 4
        num_active_vars = 2
        degree = 3
        A = np.random.normal(0, 1, (num_vars, num_vars))
        Q, R = np.linalg.qr(A)
        W1 = Q[:, :num_active_vars]

        as_poly_indices = np.asarray([
            [0, 0], [1, 0], [0, 1], [2, 0], [1, 1], [0, 2]
        ]).T

        x1d, w1d = np.polynomial.legendre.leggauss(10)
        w1d /= 2
        gl_samples = cartesian_product([x1d]*num_vars)
        gl_weights = outer_product([w1d]*num_vars)
        as_gl_samples = np.dot(W1.T, gl_samples)

        inner_product_indices = np.empty(
            (num_active_vars, as_poly_indices.shape[1]**2), dtype=int)
        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                inner_product_indices[:, ii*as_poly_indices.shape[1]+jj] =\
                    as_poly_indices[:, ii]+as_poly_indices[:, jj]

        vandermonde = monomial_basis_matrix(
            inner_product_indices, as_gl_samples)

        num_sobol_samples = 100000
        inner_products = sample_based_inner_products_on_active_subspace(
            W1, monomial_basis_matrix, as_poly_indices, num_sobol_samples,
            generate_samples)

        for ii in range(as_poly_indices.shape[1]):
            for jj in range(as_poly_indices.shape[1]):
                assert np.allclose(
                    inner_products[ii, jj],
                    np.dot(vandermonde[:, ii*as_poly_indices.shape[1]+jj],
                           gl_weights), atol=1e-4)
Ejemplo n.º 8
0
    def test_moments_of_active_subspace_II(self):
        num_vars=4; num_active_vars = 2; degree=12;
        A = np.random.normal(0,1,(num_vars,num_vars))
        Q, R = np.linalg.qr( A )
        W1 = Q[:,:num_active_vars]
        
        as_poly_indices = compute_hyperbolic_indices(num_active_vars,degree,1.0)
        moments = moments_of_active_subspace(
            W1.T, as_poly_indices, monomial_mean_uniform_variables)

        x1d,w1d = np.polynomial.legendre.leggauss(10)
        w1d /=2
        gl_samples = cartesian_product([x1d]*num_vars)
        gl_weights = outer_product([w1d]*num_vars)
        as_gl_samples = np.dot(W1.T,gl_samples)

        vandermonde = monomial_basis_matrix(as_poly_indices,as_gl_samples)
        quad_poly_moments  = np.empty(vandermonde.shape[1])
        for ii in range(vandermonde.shape[1]):
            quad_poly_moments[ii] = np.dot(vandermonde[:,ii],gl_weights)
        assert np.allclose(moments,quad_poly_moments)
Ejemplo n.º 9
0
    def test_least_squares_regression(self):
        """
        Use non-linear least squares to estimate the coefficients of the
        function train approximation of a rank-2 bivariate function.
        """
        alpha = 0
        beta = 0
        degree = 5
        num_vars = 3
        rank = 2
        num_samples = 100
        recursion_coeffs = jacobi_recurrence(degree + 1,
                                             alpha=alpha,
                                             beta=beta,
                                             probability=True)

        ranks = np.ones((num_vars + 1), dtype=int)
        ranks[1:-1] = rank

        def function(samples):
            return np.cos(samples.sum(axis=0))[:, np.newaxis]

        samples = np.random.uniform(-1, 1, (num_vars, num_samples))
        values = function(samples)
        assert values.shape[0] == num_samples

        linear_ft_data = ft_linear_least_squares_regression(samples,
                                                            values,
                                                            degree,
                                                            perturb=None)

        initial_guess = linear_ft_data[1].copy()

        # test jacobian
        # residual_func = partial(
        #    least_squares_residual,samples,values,linear_ft_data,
        #    recursion_coeffs)
        #
        # jacobian = least_squares_jacobian(
        #     samples,values,linear_ft_data,recursion_coeffs,initial_guess)
        # finite difference is expensive check on subset of points
        # for ii in range(2):
        #    func = lambda x: residual_func(x)[ii]
        #    assert np.allclose(
        #        scipy.optimize.approx_fprime(initial_guess, func, 1e-7),
        #        jacobian[ii,:])

        lstsq_ft_params = ft_non_linear_least_squares_regression(
            samples, values, linear_ft_data, recursion_coeffs, initial_guess)
        lstsq_ft_data = copy.deepcopy(linear_ft_data)
        lstsq_ft_data[1] = lstsq_ft_params

        num_valid_samples = 100
        validation_samples = np.random.uniform(-1., 1.,
                                               (num_vars, num_valid_samples))
        validation_values = function(validation_samples)

        ft_validation_values = evaluate_function_train(validation_samples,
                                                       lstsq_ft_data,
                                                       recursion_coeffs)
        ft_error = np.linalg.norm(validation_values - ft_validation_values
                                  ) / np.sqrt(num_valid_samples)
        assert ft_error < 1e-3, ft_error

        # compare against tensor-product linear least squares
        from pyapprox.monomial import monomial_basis_matrix, evaluate_monomial
        from pyapprox.indexing import tensor_product_indices
        indices = tensor_product_indices([degree] * num_vars)
        basis_matrix = monomial_basis_matrix(indices, samples)
        coef = np.linalg.lstsq(basis_matrix, values, rcond=None)[0]
        monomial_validation_values = evaluate_monomial(indices, coef,
                                                       validation_samples)
        monomial_error = np.linalg.norm(validation_values -
                                        monomial_validation_values) / np.sqrt(
                                            num_valid_samples)
        assert ft_error < monomial_error
Ejemplo n.º 10
0
    def test_substitute_polynomials_for_variables_in_another_polynomial(self):
        """
        Substitute 
          y1 = (1+x1+x2+x1*x2)
          y2 = (2+2*x1+2*x1*x3)
        into 
          y3 = 1+y1+y2+x4+y1*y2+y2*x4+y1*y2*x4 
        """
        def y1(x):
            x1, x2 = x[:2, :]
            return 1+x1+x2+x1*x2

        def y2(x):
            x1, x3 = x[[0, 2], :]
            return 2+2*x1+2*x1*x3

        def y3(x):
            x4 = x[3, :]
            y1, y2 = x[4:, :]
            return 1+y1+y2+x4+3*y1*y2+y2*x4+5*y1*y2*x4

        nvars = 4
        nsamples = 300
        degree = 5
        samples = np.random.uniform(-1, 1, (nvars, nsamples))
        values1 = y1(samples)
        values2 = y2(samples)
        values = y3(np.vstack([samples, values1[None, :], values2[None, :]]))
        
        true_indices = compute_hyperbolic_indices(nvars, degree, 1)
        basis_mat = monomial_basis_matrix(true_indices, samples)
        true_coef = np.linalg.lstsq(basis_mat, values[:, None], rcond=None)[0]

        validation_samples = np.random.uniform(-1, 1, (nvars, 1000))
        validation_values1 = y1(validation_samples)
        validation_values2 = y2(validation_samples)
        validation_values = y3(
            np.vstack([validation_samples, validation_values1[None, :],
                       validation_values2[None, :]]))

        validation_basis_mat = monomial_basis_matrix(
            true_indices, validation_samples)
        assert np.allclose(
            validation_values[:, None],
            validation_basis_mat.dot(true_coef), rtol=1e-12)

        global_var_idx = [np.array([0, 1]), np.array([0, 2])]
        indices_in = [np.array([[0, 0], [1, 0], [0, 1], [1, 1]]).T,
                      np.array([[0, 0], [1, 0], [1, 1]]).T]
        coeffs_in = [np.ones((indices_in[0].shape[1], 1)),
                     2*np.ones((indices_in[1].shape[1], 1))]
        var_idx = np.array([0, 1]) # must be related to how variables
        # enter indices below
        indices = np.array(
            [[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 1, 0],
             [0, 1, 1], [1, 1, 1]]).T
        coeffs = np.ones((indices.shape[1], 1))
        coeffs[4] = 3
        coeffs[6] = 5
        new_indices, new_coef = \
            substitute_polynomials_for_variables_in_another_polynomial(
                indices_in, coeffs_in, indices, coeffs, var_idx, global_var_idx)

        true_indices, true_coef = compress_and_sort_polynomial(
            true_coef, true_indices)
        new_indices, new_coef = compress_and_sort_polynomial(
            new_coef, new_indices)
        assert np.allclose(new_indices, true_indices)
        # print(new_coef[:, 0])
        # print(true_coef)
        assert np.allclose(new_coef, true_coef)
Ejemplo n.º 11
0
    def help_compare_prediction_based_oed(self, deviation_fun,
                                          gauss_deviation_fun,
                                          use_gauss_quadrature,
                                          ninner_loop_samples, ndesign_vars,
                                          tol):
        ncandidates_1d = 5
        design_candidates = cartesian_product(
            [np.linspace(-1, 1, ncandidates_1d)] * ndesign_vars)
        ncandidates = design_candidates.shape[1]

        # Define model used to predict likely observable data
        indices = compute_hyperbolic_indices(ndesign_vars, 1)[:, 1:]
        Amat = monomial_basis_matrix(indices, design_candidates)
        obs_fun = partial(linear_obs_fun, Amat)

        # Define model used to predict unobservable QoI
        qoi_fun = exponential_qoi_fun

        # Define the prior PDF of the unknown variables
        nrandom_vars = indices.shape[1]
        prior_variable = IndependentMultivariateRandomVariable(
            [stats.norm(0, 0.5)] * nrandom_vars)

        # Define the independent observational noise
        noise_std = 1

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])

        # Define OED options
        nouter_loop_samples = 100
        if use_gauss_quadrature:
            # 301 needed for cvar deviation
            # only 31 needed for variance deviation
            ninner_loop_samples_1d = ninner_loop_samples
            var_trans = AffineRandomVariableTransformation(prior_variable)
            x_quad, w_quad = gauss_hermite_pts_wts_1D(ninner_loop_samples_1d)
            x_quad = cartesian_product([x_quad] * nrandom_vars)
            w_quad = outer_product([w_quad] * nrandom_vars)
            x_quad = var_trans.map_from_canonical_space(x_quad)
            ninner_loop_samples = x_quad.shape[1]

            def generate_inner_prior_samples(nsamples):
                assert nsamples == x_quad.shape[1], (nsamples, x_quad.shape)
                return x_quad, w_quad
        else:
            # use default Monte Carlo sampling
            generate_inner_prior_samples = None

        # Define initial design
        init_design_indices = np.array([ncandidates // 2])

        # Setup OED problem
        oed = BayesianBatchDeviationOED(design_candidates,
                                        obs_fun,
                                        noise_std,
                                        prior_variable,
                                        qoi_fun,
                                        nouter_loop_samples,
                                        ninner_loop_samples,
                                        generate_inner_prior_samples,
                                        deviation_fun=deviation_fun)
        oed.populate()
        oed.set_collected_design_indices(init_design_indices)

        prior_mean = oed.prior_variable.get_statistics('mean')
        prior_cov = np.diag(prior_variable.get_statistics('var')[:, 0])
        prior_cov_inv = np.linalg.inv(prior_cov)
        selected_indices = init_design_indices

        # Generate experimental design
        nexperiments = 3
        for step in range(len(init_design_indices), nexperiments):
            # Copy current state of OED before new data is determined
            # This copy will be used to compute Laplace based utility and
            # evidence values for testing
            oed_copy = copy.deepcopy(oed)

            # Update the design
            utility_vals, selected_indices = oed.update_design()

            utility, deviations, evidences, weights = \
                oed_copy.compute_expected_utility(
                    oed_copy.collected_design_indices, selected_indices, True)

            exact_deviations = np.empty(nouter_loop_samples)
            for jj in range(nouter_loop_samples):
                # only test intermediate quantities associated with design
                # chosen by the OED step
                idx = oed.collected_design_indices
                obs_jj = oed_copy.outer_loop_obs[jj:jj + 1, idx]

                noise_cov_inv_jj = np.eye(idx.shape[0]) / noise_std**2
                exact_post_mean_jj, exact_post_cov_jj = \
                    laplace_posterior_approximation_for_linear_models(
                        Amat[idx, :],
                        prior_mean, prior_cov_inv, noise_cov_inv_jj, obs_jj.T)

                exact_deviations[jj] = gauss_deviation_fun(
                    exact_post_mean_jj, exact_post_cov_jj)
            print('d',
                  np.absolute(exact_deviations - deviations[:, 0]).max(), tol)
            # print(exact_deviations, deviations[:, 0])
            assert np.allclose(exact_deviations, deviations[:, 0], atol=tol)
            assert np.allclose(utility_vals[selected_indices],
                               -np.mean(exact_deviations),
                               atol=tol)