def test_fekete_rosenblatt_interpolation(self): np.random.seed(2) degree=3 __,__,joint_density,limits = rosenblatt_example_2d(num_samples=1) num_vars=len(limits)//2 rosenblatt_opts = {'limits':limits,'num_quad_samples_1d':20} var_trans_1 = RosenblattTransformation( joint_density,num_vars,rosenblatt_opts) # rosenblatt maps to [0,1] but polynomials of bounded variables # are in [-1,1] so add second transformation for this second mapping var_trans_2 = define_iid_random_variable_transformation( uniform(),num_vars) var_trans = TransformationComposition([var_trans_1, var_trans_2]) poly = PolynomialChaosExpansion() poly.configure({'poly_type':'jacobi','alpha_poly':0., 'beta_poly':0.,'var_trans':var_trans}) indices = compute_hyperbolic_indices(num_vars,degree,1.0) poly.set_indices(indices) num_candidate_samples = 10000 generate_candidate_samples=lambda n: np.cos( np.random.uniform(0.,np.pi,(num_vars,n))) precond_func = lambda matrix, samples: christoffel_weights(matrix) canonical_samples, data_structures = get_fekete_samples( poly.canonical_basis_matrix,generate_candidate_samples, num_candidate_samples,preconditioning_function=precond_func) samples = var_trans.map_from_canonical_space(canonical_samples) assert np.allclose( canonical_samples,var_trans.map_to_canonical_space(samples)) assert samples.max()<=1 and samples.min()>=0. c = np.random.uniform(0.,1.,num_vars) c*=20/c.sum() w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1) genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w) values = genz_function(samples) # function = lambda x: np.sum(x**2,axis=0)[:,np.newaxis] # values = function(samples) # Ensure coef produce an interpolant coef = interpolate_fekete_samples( canonical_samples,values,data_structures) poly.set_coefficients(coef) assert np.allclose(poly(samples),values) # compare mean computed using quadrature and mean computed using # first coefficient of expansion. This is not testing that mean # is correct because rosenblatt transformation introduces large error # which makes it hard to compute accurate mean from pce or quadrature quad_w = get_quadrature_weights_from_fekete_samples( canonical_samples,data_structures) values_at_quad_x = values[:,0] assert np.allclose( np.dot(values_at_quad_x,quad_w),poly.mean())
def test_lu_leja_interpolation(self): num_vars = 2 degree = 15 poly = PolynomialChaosExpansion() var_trans = define_iid_random_variable_transformation( stats.uniform(), num_vars) opts = define_poly_options_from_variable_transformation(var_trans) poly.configure(opts) indices = compute_hyperbolic_indices(num_vars, degree, 1.0) poly.set_indices(indices) # candidates must be generated in canonical PCE space num_candidate_samples = 10000 def generate_candidate_samples(n): return np.cos( np.random.uniform(0., np.pi, (num_vars, n))) # must use canonical_basis_matrix to generate basis matrix num_leja_samples = indices.shape[1]-1 def precond_func(matrix, samples): return christoffel_weights(matrix) samples, data_structures = get_lu_leja_samples( poly.canonical_basis_matrix, generate_candidate_samples, num_candidate_samples, num_leja_samples, preconditioning_function=precond_func) samples = var_trans.map_from_canonical_space(samples) assert samples.max() <= 1 and samples.min() >= 0. c = np.random.uniform(0., 1., num_vars) c *= 20/c.sum() w = np.zeros_like(c) w[0] = np.random.uniform(0., 1., 1) genz_function = GenzFunction('oscillatory', num_vars, c=c, w=w) values = genz_function(samples) # Ensure coef produce an interpolant coef = interpolate_lu_leja_samples(samples, values, data_structures) # Ignore basis functions (columns) that were not considered during the # incomplete LU factorization poly.set_indices(poly.indices[:, :num_leja_samples]) poly.set_coefficients(coef) assert np.allclose(poly(samples), values) quad_w = get_quadrature_weights_from_lu_leja_samples( samples, data_structures) values_at_quad_x = values[:, 0] # will get closer if degree is increased # print (np.dot(values_at_quad_x,quad_w),genz_function.integrate()) assert np.allclose( np.dot(values_at_quad_x, quad_w), genz_function.integrate(), atol=1e-4)
def test_fekete_interpolation(self): num_vars=2 degree=15 poly = PolynomialChaosExpansion() var_trans = define_iid_random_variable_transformation( uniform(),num_vars) opts = define_poly_options_from_variable_transformation(var_trans) poly.configure(opts) indices = compute_hyperbolic_indices(num_vars,degree,1.0) poly.set_indices(indices) # candidates must be generated in canonical PCE space num_candidate_samples = 10000 generate_candidate_samples=lambda n: np.cos( np.random.uniform(0.,np.pi,(num_vars,n))) # must use canonical_basis_matrix to generate basis matrix precond_func = lambda matrix, samples: christoffel_weights(matrix) samples, data_structures = get_fekete_samples( poly.canonical_basis_matrix,generate_candidate_samples, num_candidate_samples,preconditioning_function=precond_func) samples = var_trans.map_from_canonical_space(samples) assert samples.max()<=1 and samples.min()>=0. c = np.random.uniform(0.,1.,num_vars) c*=20/c.sum() w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1) genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w) values = genz_function(samples) # Ensure coef produce an interpolant coef = interpolate_fekete_samples(samples,values,data_structures) poly.set_coefficients(coef) assert np.allclose(poly(samples),values) quad_w = get_quadrature_weights_from_fekete_samples( samples,data_structures) values_at_quad_x = values[:,0] # increase degree if want smaller atol assert np.allclose( np.dot(values_at_quad_x,quad_w),genz_function.integrate(), atol=1e-4)
def help_test_stochastic_dominance(self, solver, nsamples, degree, disutility=None, plot=False): """ disutilty is none plot emprical CDF disutility is True plot disutility SSD disutility is False plot standard SSD """ from pyapprox.multivariate_polynomials import PolynomialChaosExpansion from pyapprox.variable_transformations import \ define_iid_random_variable_transformation from pyapprox.indexing import compute_hyperbolic_indices num_vars = 1 mu, sigma = 0, 1 f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \ get_lognormal_example_exact_quantities(mu,sigma) samples = np.random.normal(0, 1, (1, nsamples)) samples = np.sort(samples) values = f(samples[0, :])[:, np.newaxis] pce = PolynomialChaosExpansion() var_trans = define_iid_random_variable_transformation( normal_rv(mu, sigma), num_vars) pce.configure({'poly_type': 'hermite', 'var_trans': var_trans}) indices = compute_hyperbolic_indices(1, degree, 1.) pce.set_indices(indices) eta_indices = None #eta_indices=np.argsort(values[:,0])[nsamples//2:] coef, sd_opt_problem = solver(samples, values, pce.basis_matrix, eta_indices=eta_indices) pce.set_coefficients(coef[:, np.newaxis]) pce_values = pce(samples)[:, 0] ygrid = pce_values.copy() if disutility is not None: if disutility: ygrid = -ygrid[::-1] stat_function = partial(compute_conditional_expectations, ygrid, disutility_formulation=disutility) if disutility: # Disutility SSD eps = 1e-14 assert np.all( stat_function(values[:, 0]) <= stat_function(pce_values) + eps) else: # SSD assert np.all( stat_function(pce_values) <= stat_function(values[:, 0])) else: # FSD from pyapprox.density import EmpiricalCDF stat_function = lambda x: EmpiricalCDF(x)(ygrid) assert np.all( stat_function(pce_values) <= stat_function(values[:, 0])) if plot: lstsq_pce = PolynomialChaosExpansion() lstsq_pce.configure({ 'poly_type': 'hermite', 'var_trans': var_trans }) lstsq_pce.set_indices(indices) lstsq_coef = solve_least_squares_regression( samples, values, lstsq_pce.basis_matrix) lstsq_pce.set_coefficients(lstsq_coef) #axs[1].plot(ygrid,stat_function(values[:,0]),'ko',ms=12) #axs[1].plot(ygrid,stat_function(pce_values),'rs') #axs[1].plot(ygrid,stat_function(lstsq_pce(samples)[:,0]),'b*') ylb,yub = values.min()-abs(values.max())*.1,\ values.max()+abs(values.max())*.1 ygrid = np.linspace(ylb, yub, 101) ygrid = np.sort(np.concatenate([ygrid, pce_values])) if disutility is not None: if disutility: ygrid = -ygrid[::-1] stat_function = partial(compute_conditional_expectations, ygrid, disutility_formulation=disutility) else: print('here') print(ygrid) def stat_function(x): assert x.ndim == 1 #vals = sd_opt_problem.smoother1( #x[np.newaxis,:]-ygrid[:,np.newaxis]).mean(axis=1) vals = EmpiricalCDF(x)(ygrid) return vals fig, axs = plot_1d_functions_and_statistics( [f, pce, lstsq_pce], ['Exact', 'SSD', 'Lstsq'], samples, values, stat_function, ygrid) plt.show()
class AdaptiveInducedPCE(SubSpaceRefinementManager): def __init__(self, num_vars, cond_tol=1e2): super(AdaptiveInducedPCE, self).__init__(num_vars) self.cond_tol = cond_tol self.fit_opts = {'omp_tol': 0} self.set_preconditioning_function(chistoffel_preconditioning_function) self.fit_function = self._fit if cond_tol < 1: self.induced_sampling = False self.set_preconditioning_function( precond_func=lambda m, x: np.ones(x.shape[1])) self.sample_ratio = 5 else: self.induced_sampling = True self.sample_ratio = None def set_function(self, function, var_trans=None, pce=None): super(AdaptiveInducedPCE, self).set_function(function, var_trans) self.set_polynomial_chaos_expansion(pce) def set_polynomial_chaos_expansion(self, pce=None): if pce is None: poly_opts = define_poly_options_from_variable_transformation( self.variable_transformation) self.pce = PolynomialChaosExpansion() self.pce.configure(poly_opts) else: self.pce = pce def increment_samples(self, current_poly_indices, unique_poly_indices): if self.induced_sampling: samples = increment_induced_samples_migliorati( self.pce, self.cond_tol, self.samples, current_poly_indices, unique_poly_indices) else: samples = generate_independent_random_samples( self.pce.var_trans.variable, self.sample_ratio * unique_poly_indices.shape[1]) samples = self.pce.var_trans.map_to_canonical_space(samples) samples = np.hstack([self.samples, samples]) return samples def allocate_initial_samples(self): if self.induced_sampling: return generate_induced_samples_migliorati_tolerance( self.pce, self.cond_tol) else: return generate_independent_random_samples( self.pce.var_trans.variable, self.sample_ratio * self.pce.num_terms()) def create_new_subspaces_data(self, new_subspace_indices): num_current_subspaces = self.subspace_indices.shape[1] self.initialize_subspaces(new_subspace_indices) self.pce.set_indices(self.poly_indices) if self.samples.shape[1] == 0: unique_subspace_samples = self.allocate_initial_samples() return unique_subspace_samples, np.array( [unique_subspace_samples.shape[1]]) num_vars, num_new_subspaces = new_subspace_indices.shape unique_poly_indices = np.zeros((num_vars, 0), dtype=int) for ii in range(num_new_subspaces): I = get_subspace_active_poly_array_indices( self, num_current_subspaces + ii) unique_poly_indices = np.hstack( [unique_poly_indices, self.poly_indices[:, I]]) # current_poly_indices will include active indices not added # during this call, i.e. in new_subspace_indices. # thus cannot use # I = get_active_poly_array_indices(self) # unique_poly_indices = self.poly_indices[:,I] # to replace above loop current_poly_indices = self.poly_indices[:, :self. unique_poly_indices_idx[ num_current_subspaces]] num_samples = self.samples.shape[1] samples = self.increment_samples(current_poly_indices, unique_poly_indices) unique_subspace_samples = samples[:, num_samples:] # warning num_new_subspace_samples does not really make sense for # induced sampling as new samples are not directly tied to newly # added basis num_new_subspace_samples = unique_subspace_samples.shape[1] * np.ones( new_subspace_indices.shape[1]) // new_subspace_indices.shape[1] return unique_subspace_samples, num_new_subspace_samples def _fit(self, pce, canonical_basis_matrix, samples, values, precond_func=None, omp_tol=0): # do to, just add columns to stored basis matrix # store qr factorization of basis_matrix and update the factorization # self.samples are in canonical domain if omp_tol == 0: coef = solve_preconditioned_least_squares(canonical_basis_matrix, samples, values, precond_func) else: coef = solve_preconditioned_orthogonal_matching_pursuit( canonical_basis_matrix, samples, values, precond_func, omp_tol) self.pce.set_coefficients(coef) def fit(self): return self.fit_function(self.pce, self.pce.canonical_basis_matrix, self.samples, self.values, **self.fit_opts) def add_new_subspaces(self, new_subspace_indices): num_new_subspaces = new_subspace_indices.shape[1] num_current_subspaces = self.subspace_indices.shape[1] num_new_subspace_samples = super( AdaptiveInducedPCE, self).add_new_subspaces(new_subspace_indices) self.fit() return num_new_subspace_samples def __call__(self, samples): return self.pce(samples) def get_active_unique_poly_indices(self): I = get_active_poly_array_indices(self) return self.poly_indices[:, I] def set_preconditioning_function(self, precond_func): """ precond_func : callable Callable function with signature precond_func(basis_matrix,samples) """ self.precond_func = precond_func self.fit_opts['precond_func'] = self.precond_func
def test_lu_leja_interpolation_with_intial_samples(self): num_vars=2 degree=15 poly = PolynomialChaosExpansion() var_trans = define_iid_random_variable_transformation( uniform(),num_vars) opts = define_poly_options_from_variable_transformation(var_trans) poly.configure(opts) indices = compute_hyperbolic_indices(num_vars,degree,1.0) poly.set_indices(indices) # candidates must be generated in canonical PCE space num_candidate_samples = 10000 generate_candidate_samples=lambda n: np.cos( np.random.uniform(0.,np.pi,(num_vars,n))) # enforcing lu interpolation to interpolate a set of initial points # before selecting best samples from candidates can cause ill conditioning # to avoid this issue build a leja sequence and use this as initial # samples and then recompute sequence with different candidates # must use canonical_basis_matrix to generate basis matrix num_initial_samples = 5 initial_samples = None num_leja_samples = indices.shape[1]-1 precond_func = lambda matrix, samples: christoffel_weights(matrix) initial_samples, data_structures = get_lu_leja_samples( poly.canonical_basis_matrix,generate_candidate_samples, num_candidate_samples,num_initial_samples, preconditioning_function=precond_func, initial_samples=initial_samples) samples, data_structures = get_lu_leja_samples( poly.canonical_basis_matrix,generate_candidate_samples, num_candidate_samples,num_leja_samples, preconditioning_function=precond_func, initial_samples=initial_samples) assert np.allclose(samples[:,:num_initial_samples],initial_samples) samples = var_trans.map_from_canonical_space(samples) assert samples.max()<=1 and samples.min()>=0. c = np.random.uniform(0.,1.,num_vars) c*=20/c.sum() w = np.zeros_like(c); w[0] = np.random.uniform(0.,1.,1) genz_function = GenzFunction('oscillatory',num_vars,c=c,w=w) values = genz_function(samples) # Ensure coef produce an interpolant coef = interpolate_lu_leja_samples(samples,values,data_structures) # Ignore basis functions (columns) that were not considered during the # incomplete LU factorization poly.set_indices(poly.indices[:,:num_leja_samples]) poly.set_coefficients(coef) assert np.allclose(poly(samples),values) quad_w = get_quadrature_weights_from_lu_leja_samples( samples,data_structures) values_at_quad_x = values[:,0] assert np.allclose( np.dot(values_at_quad_x,quad_w),genz_function.integrate(), atol=1e-4)