예제 #1
0
    def test_evaluate_function_train_additive_function(self):
        """
        Test the evaluation of a function train representation of an additive
        function.

        Assume same parameterization for each core and for each univariate
        function within a core.

        Use polynomial basis for each univariate function.
        """
        alpha = 0
        beta = 0
        degree = 2
        num_vars = 3
        num_samples = 1
        recursion_coeffs = jacobi_recurrence(degree + 1,
                                             alpha=alpha,
                                             beta=beta,
                                             probability=True)

        univariate_function_params = [np.random.normal(0., 1., (degree + 1))
                                      ] * num_vars
        ft_data = generate_additive_function_in_function_train_format(
            univariate_function_params, True)

        samples = np.random.uniform(-1., 1., (num_vars, num_samples))
        values = evaluate_function_train(samples, ft_data, recursion_coeffs)

        true_values = additive_polynomial(samples, univariate_function_params,
                                          recursion_coeffs)

        assert np.allclose(values, true_values)
예제 #2
0
 def function(samples):
     return evaluate_function_train(samples, ft_data, recursion_coeffs)
예제 #3
0
    def test_restricted_least_squares_regression_additive_function(self):
        """
        Use non-linear least squares to estimate the coefficients of the
        function train approximation of a rank-2 bivariate function,
        optimizing only over a subset of the FT parameters.
        """
        alpha = 0
        beta = 0
        degree = 5
        num_vars = 3
        rank = 2
        num_samples = 20
        recursion_coeffs = jacobi_recurrence(degree + 1,
                                             alpha=alpha,
                                             beta=beta,
                                             probability=True)

        ranks = np.ones((num_vars + 1), dtype=int)
        ranks[1:-1] = rank
        num_params_1d = degree + 1

        univariate_function_params = [np.random.normal(0., 1., (degree + 1))
                                      ] * num_vars
        ft_data = generate_additive_function_in_function_train_format(
            univariate_function_params, False)

        def function(samples):
            return evaluate_function_train(samples, ft_data, recursion_coeffs)

        samples = np.random.uniform(-1, 1, (num_vars, num_samples))
        values = function(samples)
        assert values.shape[0] == num_samples

        linear_ft_data = ft_linear_least_squares_regression(samples,
                                                            values,
                                                            degree,
                                                            perturb=None)

        initial_guess = linear_ft_data[1].copy()

        active_indices = []
        active_indices += list(range(num_params_1d))
        active_indices += list(range(3 * num_params_1d, 4 * num_params_1d))
        active_indices += list(range(7 * num_params_1d, 8 * num_params_1d))
        active_indices = np.asarray(active_indices)
        # active_indices = np.where((ft_data[1]!=0)&(ft_data[1]!=1))[0]
        initial_guess = initial_guess[active_indices]

        lstsq_ft_params = ft_non_linear_least_squares_regression(
            samples,
            values,
            linear_ft_data,
            recursion_coeffs,
            initial_guess,
            active_indices=active_indices)
        lstsq_ft_data = copy.deepcopy(linear_ft_data)
        lstsq_ft_data[1] = lstsq_ft_params

        num_valid_samples = 100
        validation_samples = np.random.uniform(-1., 1.,
                                               (num_vars, num_valid_samples))
        validation_values = function(validation_samples)

        ft_validation_values = evaluate_function_train(validation_samples,
                                                       lstsq_ft_data,
                                                       recursion_coeffs)
        ft_error = np.linalg.norm(validation_values - ft_validation_values
                                  ) / np.sqrt(num_valid_samples)
        assert ft_error < 1e-3, ft_error
예제 #4
0
    def test_restricted_least_squares_regression_sparse(self):
        """
        Use non-linear least squares to estimate the coefficients of the
        function train approximation of a rank-2 bivariate function,
        optimizing only over a subset of the FT parameters.
        """
        alpha = 0
        beta = 0
        degree = 5
        num_vars = 3
        rank = 2
        num_samples = 20
        sparsity_ratio = 0.2
        recursion_coeffs = jacobi_recurrence(degree + 1,
                                             alpha=alpha,
                                             beta=beta,
                                             probability=True)

        ranks = np.ones((num_vars + 1), dtype=int)
        ranks[1:-1] = rank

        ft_data = generate_random_sparse_function_train(
            num_vars, rank, degree + 1, sparsity_ratio)

        def function(samples):
            return evaluate_function_train(samples, ft_data, recursion_coeffs)

        samples = np.random.uniform(-1, 1, (num_vars, num_samples))
        values = function(samples)
        assert values.shape[0] == num_samples

        active_indices = np.where((ft_data[1] != 0) & (ft_data[1] != 1))[0]

        linear_ft_data = ft_linear_least_squares_regression(samples,
                                                            values,
                                                            degree,
                                                            perturb=None)
        initial_guess = linear_ft_data[1].copy()
        initial_guess = initial_guess[active_indices]
        # initial_guess = true_sol[active_indices] + np.random.normal(
        #    0.,1.,(active_indices.shape[0]))

        lstsq_ft_params = ft_non_linear_least_squares_regression(
            samples,
            values,
            ft_data,
            recursion_coeffs,
            initial_guess,
            active_indices=active_indices)
        lstsq_ft_data = copy.deepcopy(ft_data)
        lstsq_ft_data[1] = lstsq_ft_params

        num_valid_samples = 100
        validation_samples = np.random.uniform(-1., 1.,
                                               (num_vars, num_valid_samples))
        validation_values = function(validation_samples)

        ft_validation_values = evaluate_function_train(validation_samples,
                                                       lstsq_ft_data,
                                                       recursion_coeffs)
        ft_error = np.linalg.norm(validation_values - ft_validation_values
                                  ) / np.sqrt(num_valid_samples)
        # print ft_error
        assert ft_error < 1e-3, ft_error
예제 #5
0
    def test_least_squares_regression(self):
        """
        Use non-linear least squares to estimate the coefficients of the
        function train approximation of a rank-2 bivariate function.
        """
        alpha = 0
        beta = 0
        degree = 5
        num_vars = 3
        rank = 2
        num_samples = 100
        recursion_coeffs = jacobi_recurrence(degree + 1,
                                             alpha=alpha,
                                             beta=beta,
                                             probability=True)

        ranks = np.ones((num_vars + 1), dtype=int)
        ranks[1:-1] = rank

        def function(samples):
            return np.cos(samples.sum(axis=0))[:, np.newaxis]

        samples = np.random.uniform(-1, 1, (num_vars, num_samples))
        values = function(samples)
        assert values.shape[0] == num_samples

        linear_ft_data = ft_linear_least_squares_regression(samples,
                                                            values,
                                                            degree,
                                                            perturb=None)

        initial_guess = linear_ft_data[1].copy()

        # test jacobian
        # residual_func = partial(
        #    least_squares_residual,samples,values,linear_ft_data,
        #    recursion_coeffs)
        #
        # jacobian = least_squares_jacobian(
        #     samples,values,linear_ft_data,recursion_coeffs,initial_guess)
        # finite difference is expensive check on subset of points
        # for ii in range(2):
        #    func = lambda x: residual_func(x)[ii]
        #    assert np.allclose(
        #        scipy.optimize.approx_fprime(initial_guess, func, 1e-7),
        #        jacobian[ii,:])

        lstsq_ft_params = ft_non_linear_least_squares_regression(
            samples, values, linear_ft_data, recursion_coeffs, initial_guess)
        lstsq_ft_data = copy.deepcopy(linear_ft_data)
        lstsq_ft_data[1] = lstsq_ft_params

        num_valid_samples = 100
        validation_samples = np.random.uniform(-1., 1.,
                                               (num_vars, num_valid_samples))
        validation_values = function(validation_samples)

        ft_validation_values = evaluate_function_train(validation_samples,
                                                       lstsq_ft_data,
                                                       recursion_coeffs)
        ft_error = np.linalg.norm(validation_values - ft_validation_values
                                  ) / np.sqrt(num_valid_samples)
        assert ft_error < 1e-3, ft_error

        # compare against tensor-product linear least squares
        from pyapprox.monomial import monomial_basis_matrix, evaluate_monomial
        from pyapprox.indexing import tensor_product_indices
        indices = tensor_product_indices([degree] * num_vars)
        basis_matrix = monomial_basis_matrix(indices, samples)
        coef = np.linalg.lstsq(basis_matrix, values, rcond=None)[0]
        monomial_validation_values = evaluate_monomial(indices, coef,
                                                       validation_samples)
        monomial_error = np.linalg.norm(validation_values -
                                        monomial_validation_values) / np.sqrt(
                                            num_valid_samples)
        assert ft_error < monomial_error