def update_leja_sequence_slow(self, new_subspace_indices):
        num_samples = self.samples.shape[1]
        # There will be two copies of self.samples in candidate_samples
        # but pivoting will only choose these samples once when number of
        # desired samples is smaller than
        # self.candidate_samples.shape[0]-self.samples.shape[1]
        candidate_samples = np.hstack([self.samples, self.candidate_samples])

        self.pce.set_indices(self.poly_indices)
        precond_basis_matrix, precond_weights = \
            self.precond_canonical_basis_matrix(candidate_samples)

        # TODO: update LU factorization using new candidate points, This
        # requires writing a function that updates not just new columns of
        # L and U factor but also allows new rows to be added.
        max_iters = self.poly_indices.shape[1]
        num_initial_rows = num_samples
        self.L_factor,self.U_factor,pivots=\
          truncated_pivoted_lu_factorization(
              precond_basis_matrix,max_iters,num_initial_rows=num_initial_rows)
        self.pivots = np.arange(num_samples)[pivots[:num_initial_rows]]
        self.pivots = np.concatenate(
            [self.pivots,
             np.arange(num_initial_rows, pivots.shape[0])])
        self.precond_weights = precond_weights[pivots, np.newaxis]
        return candidate_samples[:, pivots[num_samples:]]
Example #2
0
def get_lu_leja_samples(generate_basis_matrix,
                        generate_candidate_samples,
                        num_candidate_samples,
                        num_leja_samples,
                        preconditioning_function=None,
                        initial_samples=None):
    """
    Generate Leja samples using LU factorization. 

    Parameters
    ----------
    generate_basis_matrix : callable
        basis_matrix = generate_basis_matrix(candidate_samples)
        Function to evaluate a basis at a set of samples

    generate_candidate_samples : callable
        candidate_samples = generate_candidate_samples(num_candidate_samples)
        Function to generate candidate samples. This can siginficantly effect
        the fekete samples generated

    num_candidate_samples : integer
        The number of candidate_samples

    preconditioning_function : callable
        basis_matrix = preconditioning_function(basis_matrix)
        precondition a basis matrix to improve stability
        samples are the samples used to build the basis matrix. They must
        be in the same order as they were used to create the rows of the basis 
        matrix.

    TODO unfortunately some preconditioing_functions need only basis matrix
    or samples, but cant think of a better way to generically pass in function
    here other than to require functions that use both arguments

    num_leja_samples : integer
        The number of desired leja samples. Must be <= num_indices

    initial_samples : np.ndarray (num_vars,num_initial_samples)
       Enforce that the initial samples are chosen (in the order specified)
       before any other candidate sampels are chosen. This can lead to
       ill conditioning and leja sequence terminating early

    Returns
    -------
    laja_samples : np.ndarray (num_vars, num_indices)
        The samples of the Leja sequence

    data_structures : tuple
        (Q,R,p) the QR factors and pivots. This can be useful for
        quickly building an interpolant from the samples
    """
    candidate_samples = generate_candidate_samples(num_candidate_samples)
    if initial_samples is not None:
        assert candidate_samples.shape[0] == initial_samples.shape[0]
        candidate_samples = np.hstack((initial_samples, candidate_samples))
        num_initial_rows = initial_samples.shape[1]
    else:
        num_initial_rows = 0

    basis_matrix = generate_basis_matrix(candidate_samples)

    assert num_leja_samples <= basis_matrix.shape[1]
    if preconditioning_function is not None:
        weights = np.sqrt(
            preconditioning_function(basis_matrix, candidate_samples))
        basis_matrix = (basis_matrix.T * weights).T
    else:
        weights = None
    L, U, p = truncated_pivoted_lu_factorization(basis_matrix,
                                                 num_leja_samples,
                                                 num_initial_rows)
    assert p.shape[0] == num_leja_samples, (p.shape, num_leja_samples)
    p = p[:num_leja_samples]
    leja_samples = candidate_samples[:, p]
    # Ignore basis functions (columns) that were not considered during the
    # incomplete LU factorization
    L = L[:, :num_leja_samples]
    U = U[:num_leja_samples, :num_leja_samples]
    data_structures = [L, U, p, weights[p]]
    plot = False
    if plot:
        import matplotlib.pyplot as plt
        print(('N:', basis_matrix.shape[1]))
        plt.plot(leja_samples[0, 0], leja_samples[1, 0], '*')
        plt.plot(leja_samples[0, :], leja_samples[1, :], 'ro', zorder=10)
        plt.scatter(candidate_samples[0, :],
                    candidate_samples[1, :],
                    s=weights * 100,
                    color='b')
        #plt.xlim(-1,1)
        #plt.ylim(-1,1)
        #plt.title('Leja sequence and candidates')
        #print (weights[p])
        plt.show()
    return leja_samples, data_structures
Example #3
0
    def test_least_interpolation_lu_equivalence_in_1d(self):
        num_vars = 1
        alpha_stat = 2; beta_stat  = 5
        max_num_pts = 100
        
        var_trans = define_iid_random_variable_transformation(
            beta(alpha_stat,beta_stat),num_vars)
        pce_opts = {'alpha_poly':beta_stat-1,'beta_poly':alpha_stat-1,
                    'var_trans':var_trans,'poly_type':'jacobi',}

        # Set oli options
        oli_opts = {'verbosity':0,
                    'assume_non_degeneracy':False}

        basis_generator = \
          lambda num_vars,degree: (degree+1,compute_hyperbolic_level_indices(
              num_vars,degree,1.0))

        pce = PolynomialChaosExpansion()
        pce.configure(pce_opts)

        oli_solver = LeastInterpolationSolver()
        oli_solver.configure(oli_opts)
        oli_solver.set_pce(pce)

        # univariate_beta_pdf = partial(beta.pdf,a=alpha_stat,b=beta_stat)
        # univariate_pdf = lambda x: univariate_beta_pdf(x)
        # preconditioning_function = partial(
        #     tensor_product_pdf,univariate_pdfs=univariate_pdf)
        from pyapprox.indexing import get_total_degree
        max_degree = get_total_degree(num_vars,max_num_pts)
        indices = compute_hyperbolic_indices(num_vars, max_degree, 1.)
        pce.set_indices(indices)
        
        from pyapprox.polynomial_sampling import christoffel_function
        preconditioning_function = lambda samples: 1./christoffel_function(
            samples,pce.basis_matrix)
    
        oli_solver.set_preconditioning_function(preconditioning_function)
        oli_solver.set_basis_generator(basis_generator)
        
        initial_pts = None
        candidate_samples = np.linspace(0.,1.,1000)[np.newaxis,:]

        oli_solver.factorize(
            candidate_samples, initial_pts,
            num_selected_pts = max_num_pts)

        oli_samples = oli_solver.get_current_points()

        from pyapprox.utilities import truncated_pivoted_lu_factorization
        pce.set_indices(oli_solver.selected_basis_indices)
        basis_matrix = pce.basis_matrix(candidate_samples)
        weights = np.sqrt(preconditioning_function(candidate_samples))
        basis_matrix = np.dot(np.diag(weights),basis_matrix)
        L,U,p = truncated_pivoted_lu_factorization(
            basis_matrix,max_num_pts)
        assert p.shape[0]==max_num_pts
        lu_samples = candidate_samples[:,p]

        assert np.allclose(lu_samples,oli_samples)

        L1,U1,H1 = oli_solver.get_current_LUH_factors()
        
        true_permuted_matrix = (pce.basis_matrix(lu_samples).T*weights[p]).T
        assert np.allclose(np.dot(L,U),true_permuted_matrix)
        assert np.allclose(np.dot(L1,np.dot(U1,H1)),true_permuted_matrix)
    def test_hermite_christoffel_leja_sequence_1d(self):
        import warnings
        warnings.filterwarnings('error')
        # for unbounded variables can get overflow warnings because when
        # optimizing interval with one side unbounded and no local minima
        # exists then optimization will move towards inifinity
        max_nsamples = 20
        initial_points = np.array([[0, stats.norm(0, 1).ppf(0.75)]])
        # initial_points = np.array([[stats.norm(0, 1).ppf(0.75)]])
        ab = hermite_recurrence(max_nsamples + 1, 0, False)
        basis_fun = partial(evaluate_orthonormal_polynomial_deriv_1d, ab=ab)

        # plot_degree = np.inf  # max_nsamples-1
        # assert plot_degree < max_nsamples

        # def callback(leja_sequence, coef, new_samples, obj_vals,
        #              initial_guesses):
        #     degree = coef.shape[0]-1
        #     new_basis_degree = degree+1
        #     if new_basis_degree != plot_degree:
        #         return
        #     plt.clf()

        #     def plot_fun(x):
        #         return -christoffel_leja_objective_fun_1d(
        #             partial(basis_fun, nmax=new_basis_degree, deriv_order=0),
        #             coef, x[None, :])
        #     xx = np.linspace(-20, 20, 1001)
        #     plt.plot(xx, plot_fun(xx))
        #     plt.plot(leja_sequence[0, :], plot_fun(leja_sequence[0, :]), 'o')
        #     I = np.argmin(obj_vals)
        #     plt.plot(new_samples[0, I], obj_vals[I], 's')
        #     plt.plot(
        #         initial_guesses[0, :], plot_fun(initial_guesses[0, :]), '*')
        #     #plt.xlim(-10, 10)
        #     print('s', new_samples[0], obj_vals)

        leja_sequence = get_christoffel_leja_sequence_1d(max_nsamples,
                                                         initial_points,
                                                         [-np.inf, np.inf],
                                                         basis_fun, {
                                                             'gtol': 1e-8,
                                                             'verbose': False,
                                                             'iprint': 2
                                                         },
                                                         callback=None)

        # compare to lu based leja samples
        # given the same set of initial samples the next sample chosen
        # should be close with the one from the gradient based method having
        # slightly better objective value
        num_candidate_samples = 10001

        def generate_candidate_samples(n):
            return np.linspace(-20, 20, n)[None, :]

        for ii in range(initial_points.shape[1], max_nsamples):
            degree = ii - 1
            new_basis_degree = degree + 1
            candidate_samples = generate_candidate_samples(
                num_candidate_samples)
            candidate_samples = np.hstack(
                [leja_sequence[:, :ii], candidate_samples])
            bfun = partial(basis_fun, nmax=new_basis_degree, deriv_order=0)
            basis_mat = bfun(candidate_samples[0, :])
            basis_mat = sqrt_christoffel_function_inv_1d(
                bfun, candidate_samples)[:, None] * basis_mat
            LU, pivots = truncated_pivoted_lu_factorization(
                basis_mat, ii + 1, ii, False)
            # cannot use get_candidate_based_leja_sequence_1d
            # because it uses christoffel function that is for maximum
            # degree
            discrete_leja_sequence = candidate_samples[:, pivots[:ii + 1]]

            # if new_basis_degree == plot_degree:
            #     # mulitply by LU[ii, ii]**2 to account for LU factorization
            #     # dividing the column by this number
            #     discrete_obj_vals = -LU[:, ii]**2*LU[ii, ii]**2
            #     # account for pivoting of ith column of LU factor
            #     # value of best objective can be found in the iith pivot
            #     discrete_obj_vals[ii] = discrete_obj_vals[pivots[ii]]
            #     discrete_obj_vals[pivots[ii]] = -LU[ii, ii]**2
            #     I = np.argsort(candidate_samples[0, ii:])+ii
            #     plt.plot(candidate_samples[0, I], discrete_obj_vals[I], '--')
            #     plt.plot(candidate_samples[0, pivots[ii]],
            #              -LU[ii, ii]**2, 'k^')
            #     plt.show()

            def objective_value(sequence):
                tmp = bfun(sequence[0, :])
                basis_mat = tmp[:, :-1]
                new_basis = tmp[:, -1:]
                coef = compute_coefficients_of_christoffel_leja_interpolant_1d(
                    basis_mat, new_basis)
                return christoffel_leja_objective_fun_1d(
                    bfun, coef, sequence[:, -1:])

            # discrete_obj_val = objective_value(
            #     discrete_leja_sequence[:, :ii+1])
            # obj_val = objective_value(leja_sequence[:, :ii+1])

            diff = candidate_samples[0, -1] - candidate_samples[0, -2]
            # print(ii, obj_val - discrete_obj_val)
            print(leja_sequence[:, :ii + 1], discrete_leja_sequence)
            # assert obj_val >= discrete_obj_val
            # obj_val will not always be greater than because of optimization
            # tolerance and discretization of candidate samples
            # assert abs(obj_val - discrete_obj_val) < 1e-4
            assert (abs(leja_sequence[0, ii] - discrete_leja_sequence[0, -1]) <
                    diff)
    def update_leja_sequence_fast(self, new_subspace_indices,
                                  num_current_subspaces):
        num_samples = self.samples.shape[1]
        if num_samples == 0:
            self.pce.set_indices(self.poly_indices)
            max_iters = self.poly_indices.shape[1]
            # keep unconditioned
            self.basis_matrix = self.precond_canonical_basis_matrix(
                self.candidate_samples)[0]
            self.LU_factor,self.seq_pivots = \
                truncated_pivoted_lu_factorization(
                    self.basis_matrix, max_iters, truncate_L_factor=False)
            self.pivots = get_final_pivots_from_sequential_pivots(
                self.seq_pivots.copy())[:max_iters]
            #self.precond_weights = np.sqrt(
            #    self.basis_matrix.shape[1]*christoffel_weights(
            #        self.basis_matrix))[:,np.newaxis]
            self.precond_weights = self.precond_func(
                self.basis_matrix, self.candidate_samples)[:, np.newaxis]
            return self.candidate_samples[:,
                                          self.pivots[num_samples:self.
                                                      poly_indices.shape[1]]]

        num_vars, num_new_subspaces = new_subspace_indices.shape
        unique_poly_indices = np.zeros((num_vars, 0), dtype=int)
        for ii in range(num_new_subspaces):
            I = get_subspace_active_poly_array_indices(
                self, num_current_subspaces + ii)
            unique_poly_indices = np.hstack(
                [unique_poly_indices, self.poly_indices[:, I]])
        self.pce.set_indices(unique_poly_indices)

        precond_weights_prev = self.precond_weights
        pivoted_precond_weights_prev = pivot_rows(self.seq_pivots,
                                                  precond_weights_prev, False)

        new_cols = self.pce.canonical_basis_matrix(self.candidate_samples)
        self.basis_matrix = np.hstack([self.basis_matrix, new_cols.copy()])
        new_cols *= precond_weights_prev
        self.LU_factor = add_columns_to_pivoted_lu_factorization(
            self.LU_factor.copy(), new_cols, self.seq_pivots[:num_samples])

        #self.precond_weights = np.sqrt(
        #    self.basis_matrix.shape[1]*christoffel_weights(
        #        self.basis_matrix))[:,np.newaxis]
        self.precond_weights = self.precond_func(
            self.basis_matrix, self.candidate_samples)[:, np.newaxis]
        pivoted_precond_weights = pivot_rows(self.seq_pivots,
                                             self.precond_weights, False)
        self.LU_factor = unprecondition_LU_factor(
            self.LU_factor,
            pivoted_precond_weights_prev / pivoted_precond_weights,
            num_samples)

        it = self.poly_indices.shape[1]
        max_iters = self.poly_indices.shape[1]
        self.LU_factor, self.seq_pivots, it = continue_pivoted_lu_factorization(
            self.LU_factor.copy(),
            self.seq_pivots,
            self.samples.shape[1],
            max_iters,
            num_initial_rows=0)
        self.pivots = get_final_pivots_from_sequential_pivots(
            self.seq_pivots.copy())[:max_iters]

        self.pce.set_indices(self.poly_indices)
        return self.candidate_samples[:, self.pivots[num_samples:self.
                                                     poly_indices.shape[1]]]