def test_least_interpolant( self ): num_dims = 2 func_domain = TensorProductDomain( num_dims, [[-1,1]] ) x = -numpy.cos( numpy.linspace( 0., 1., 10 ) * numpy.pi ); [X,Y] = numpy.meshgrid( x, x ) build_points = numpy.vstack( ( X.reshape(( 1,X.shape[0]*X.shape[1])), Y.reshape(( 1, Y.shape[0]*Y.shape[1])))) build_values = matlab_peaks( build_points ) poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) linear_solver = LeastInterpolation() predictor = PCE( build_points.shape[0], basis = basis, linear_solver = linear_solver ) predictor.function_domain( func_domain ) predictor.build( build_points, build_values ) result = predictor.evaluate_set( build_points ) if ( result.ndim > 1 ): result = result.reshape( result.shape[0] ) error = result - build_values l2_error = numpy.sqrt( numpy.dot( error.T, error ) / \ build_points.shape[1] ) print 'error at nodes: %1.15e' %l2_error assert l2_error < self.eps poly_1d = [ JacobiPolynomial1D( 0.5, 0.5 ) ] basis = TensorProductBasis( num_dims, poly_1d ) linear_solver = LeastInterpolation() predictor = PCE( build_points.shape[0], basis = basis, linear_solver = linear_solver ) predictor.function_domain( func_domain ) predictor.build( build_points, build_values ) result = predictor.evaluate_set( build_points ) if ( result.ndim > 1 ): result = result.reshape( result.shape[0] )
def test_polynomial_chaos_expansion( self ): # test 1D bounded domain num_dims = 1 func_domain = TensorProductDomain( num_dims ) poly_1d = [ JacobiPolynomial1D( 0., 0. ) ] basis = TensorProductBasis( num_dims, poly_1d ) predictor = PCE( num_dims, basis, order = 2 ) predictor.set_coefficients( numpy.ones( predictor.coeff.shape ) ) predictor.function_domain( func_domain ) num_test_points = 20 test_points = \ numpy.linspace( 0., 1., num_test_points ).reshape(1,num_test_points) pred_vals = predictor.evaluate_set( test_points ) x = 2. * test_points - 1. test_vals = numpy.ones( num_test_points ) + x[0,:] + \ 0.5 * ( 3.*x[0,:]**2 - 1. ) assert numpy.allclose( test_vals, pred_vals ) test_mean = 1. test_variance = 1./3. + 1./5. assert numpy.allclose( predictor.mean(), test_mean ) assert numpy.allclose( predictor.variance(), test_variance ) # test 2D bounded domain num_dims = 2 func_domain = TensorProductDomain( num_dims ) poly_1d = [ JacobiPolynomial1D( 0., 0. ) ] basis = TensorProductBasis( num_dims, poly_1d ) predictor = PCE( num_dims, basis, order = 2 ) predictor.set_coefficients( numpy.ones( predictor.coeff.shape ) ) predictor.function_domain( func_domain ) num_test_points = 20 test_points = numpy.random.uniform( 0., 1., ( num_dims, num_test_points)) pred_vals = predictor.evaluate_set( test_points ) x = 2. * test_points - 1. test_vals = numpy.ones( num_test_points ) + x[0,:] + x[1,:] + \ 0.5 * ( 3.*x[0,:]**2 - 1. ) + 0.5 * ( 3.*x[1,:]**2 - 1. ) + \ x[0,:] * x[1,:] assert numpy.allclose( test_vals, pred_vals ) test_mean = 1. test_variance = 2. * 1./3. + 1./9. + 2. * 1./5. assert numpy.allclose( predictor.mean(), test_mean ) assert numpy.allclose( predictor.variance(), test_variance ) # test when domain is unbounded in one dimension num_dims = 2 func_domain = TensorProductDomain( num_dims, ranges = [[0.,1.], [-numpy.inf, numpy.inf]] ) poly_1d = [ JacobiPolynomial1D( 0., 0. ), HermitePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) predictor = PCE( num_dims, basis, order = 2 ) predictor.set_coefficients( numpy.ones( predictor.coeff.shape ) ) predictor.function_domain( func_domain ) num_test_points = 20 x_1 = numpy.random.uniform( 0., 1., ( 1, 20 ) ) x_2 = numpy.random.normal( 0., 1., ( 1, 20 ) ) test_points = numpy.vstack( ( x_1, x_2 ) ) pred_vals = predictor.evaluate_set( test_points ) x = test_points x[0,:] = 2. * x[0,:] - 1. test_vals = numpy.ones( num_test_points ) + x[0,:] + x[1,:] + \ 0.5 * ( 3.*x[0,:]**2 - 1. ) + ( x[1,:]**2 - 1. ) + \ x[0,:] * x[1,:] assert numpy.allclose( test_vals, pred_vals ) test_mean = 1. test_variance = 2. * 1./3. + 1./5. + 2. + 1. assert numpy.allclose( predictor.mean(), test_mean ) assert numpy.allclose( predictor.variance(), test_variance )
class GridPointAdaptedSparseGrid(object): def __init__( self, num_dims, max_level, quadrature_rule, refinement_manager, sparse_grid_data, target_function ): self.num_dims = num_dims self.max_level = max_level self.quadrature_rule = quadrature_rule self.refinement_manager = refinement_manager self.target_function = target_function # for least interpolant poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) linear_solver = LeastInterpolation() self.pce = PCE( self.num_dims, basis = basis, linear_solver = linear_solver ) self.pce.function_domain( self.quadrature_rule.domain ) # grid points used in the sparse grid self.grid_point_indices = set() # number of grid points in the grid. This is not equal to # len( self.grid_point_indices ). The later contains all grid points # including candidates and those actually in the grid self.num_grid_points = 0 # grid points that are candidates for addition to the sparse grid # but have not been initialized and so can not be prioritized self.uninit_grid_point_indices = set() # grid points that are candidates for addition to the sparse grid self.candidate_grid_point_indices_queue = [] # Python note: heap[0] has the smallest value ( inverse of priority ), # i.e the highest prioirity # grid points that are candidates for addition to the sparse grid # This object will contain the same grid point indices as # self.candidate_grid_point_indices_queue but is used to ensure a # point is only added to the self.candidate_grid_point_indices_queue #once self.candidate_grid_point_indices_hash = set() # keep track of the number of grid points generated # ( self.num_grid_points + self.num_candidate_grid_points + # self.num_uninit_grid_points ) self.num_grid_point_indices_generated = 0 self.data = sparse_grid_data def build_root( self ): """ Initialize the grid by creating the root subspace """ root_grid_point_index = GridPointIndex( None ) self.insert_uninit_grid_point( root_grid_point_index ) def insert_remaining_candidate_points( self ): if ( self.refinement_manager.candidate_point_needs_func_eval() ): while ( len( self.candidate_grid_point_indices_queue ) > 0 ): inv_priority, grid_point_index = \ heapq.heappop( self.candidate_grid_point_indices_queue ) self.candidate_grid_point_indices_hash.remove( grid_point_index ) self.insert_grid_point( grid_point_index ) def insert_next_grid_point( self ): """ Insert a subspace into the list of subspaces in the grid """ # if two indices have the same priority the indices themselves will be # compared inv_priority, grid_point_index = \ heapq.heappop( self.candidate_grid_point_indices_queue ) self.candidate_grid_point_indices_hash.remove( grid_point_index ) if ( grid_point_index not in self.grid_point_indices ): if ( not self.refinement_manager.candidate_point_needs_func_eval() ): self.evaluate_target_function( grid_point_index ) self.insert_grid_point( grid_point_index ) else: # point was originally a missing parent pass return grid_point_index def insert_uninit_grid_point( self, grid_point_index ): if ( grid_point_index not in self.uninit_grid_point_indices and grid_point_index not in self.candidate_grid_point_indices_hash and grid_point_index not in self.grid_point_indices and self.refinement_manager.grid_point_admissible( grid_point_index ) ): self.uninit_grid_point_indices.add( grid_point_index ) grid_point_index.array_index = self.num_grid_point_indices_generated self.num_grid_point_indices_generated += 1 def insert_candidate_grid_point( self, grid_point_index ): # I think this can be removed if check is made when point is added to # unitialized set, check for existance must be made for uninit # candidate and sparse grid points if ( grid_point_index not in self.candidate_grid_point_indices_hash ): heapq.heappush( self.candidate_grid_point_indices_queue, ( 1. / grid_point_index.priority, grid_point_index )) self.candidate_grid_point_indices_hash.add( grid_point_index ) else: msg = 'grid point already exists' raise Exception, msg def insert_grid_point( self, grid_point_index ): self.grid_point_indices.add( grid_point_index ) self.num_grid_points += 1 def refine( self ): """ Perform point-wise adaptive refinement. Extract the grid points with the highest priority. """ grid_point_index = self.insert_next_grid_point() if self.refinement_manager.grid_point_warrants_refinement( grid_point_index ): for dim in xrange( self.quadrature_rule.num_dims ): # todo would be better not to assume that a left and right child # are both available or more are not as well child_index = self.quadrature_rule.child_index( grid_point_index, dim, 'left' ) if child_index is not None: self.insert_uninit_grid_point( child_index ) child_index = self.quadrature_rule.child_index( grid_point_index, dim, 'right' ) if child_index is not None: self.insert_uninit_grid_point( child_index ) def compute_hierarical_surpluses( self, fn_val, coord, grid_point_index ): self.insert_missing_parent_grid_points( grid_point_index ) subspace_index = grid_point_index.subspace_index() return fn_val - self.evaluate_set( coord.reshape( self.num_dims,1), subspace_index ) # evaluate all points belonging to subspaces that have indices <= # current_subspace_index def evaluate_set( self, pts, current_subspace_index = None ): result = numpy.zeros( ( pts.shape[1] ), numpy.double ) for grid_point_index in self.grid_point_indices: evaluate_grid_point = True # this is very expensive. at least in python #grid_point_subspace_index = grid_point_index.subspace_index() #if ( current_subspace_index is None ): # evaluate_grid_point = True #elif ( grid_point_subspace_index <= current_subspace_index ): # evaluate_grid_point = True #else: # evaluate_grid_point = False if ( evaluate_grid_point ): if ( grid_point_index.data is not None ): #evaluate basis result += \ self.data.hier_surplus[grid_point_index.array_index] * \ self.quadrature_rule.basis.value( pts, grid_point_index, self.quadrature_rule.domain ) else: result += \ self.data.hier_surplus[grid_point_index.array_index] * \ numpy.ones( ( result.shape[0] ), numpy.double ) return result def evaluate_set1( self, pts, current_subspace_index = None ): if ( len( self.grid_point_indices ) == 0 ): return numpy.zeros( ( pts.shape[1] ), numpy.double ) poly_indices = \ get_polynomial_indices_of_subspace_indices( self.refinement_manager.subspace_indices, self.quadrature_rule ) coords = numpy.empty( ( self.num_dims, len( self.grid_point_indices ) ), numpy.double ) values = numpy.empty( ( len( self.grid_point_indices ), 1 ), numpy.double ) for i, grid_point_index in enumerate( self.grid_point_indices ): coords[:,i] = \ self.quadrature_rule.coordinate( grid_point_index ) values[i,0] = self.data.fn_vals[grid_point_index.array_index] self.pce.build( coords, values ) return self.pce.evaluate_set( pts ) def grid_point_coordinates( self ): coords = numpy.empty( ( self.num_dims, len( self.grid_point_indices ) ), numpy.double ) for i, grid_point_index in enumerate( self.grid_point_indices ): coords[:,i] = \ self.quadrature_rule.coordinate( grid_point_index ) return coords def insert_parent_grid_point( self, grid_point_index ): # If the parent is missing from self.sparse_grid_indices # always promote it to self.sparse grid_indices. # If the parent is in self.candidate_grid_point_indices # keep it there so it will be refined. Doing this means that # in insert_next_grid_point we must include a check that does not # allow points that are already in self.sparse_grid_indices to # be added again. We must also evaluate # the target function if it has not been evaluated already. # If the parent is in self.uninit_grid_point_indices # then promote it to the candidate set. We must always calculate # the priority and evaluate the function if not done so already. self.insert_missing_parent_grid_points( grid_point_index ) # Check if grid_point_index is in a candidate point array_index = None grid_point_index_ref=find( self.candidate_grid_point_indices_hash, grid_point_index ) if ( grid_point_index_ref is not None ): # grid_point_index is not a candidate point. # grid_point_index_ref contains is the original index # contained in self.candidate_grid_point_indices_hash. # grid_point_index will not have priority or aray_index set. # only evaluate function if not done so already if ( not self.refinement_manager.candidate_point_needs_func_eval() ): self.evaluate_target_function( grid_point_index_ref ) # promote point to the sparse grid self.insert_grid_point( grid_point_index_ref ) else: # grid_point_index is not a candidate point so check if it is # an uninit point grid_point_index_ref = \ find( self.uninit_grid_point_indices, grid_point_index ) if ( grid_point_index_ref is not None ): # grid_point_index is an uninit point # grid_point_index_ref contains is the original index # contained in self.uninit_grid_point_indices. self.uninit_grid_point_indices.remove(grid_point_index_ref) self.insert_candidate_grid_point( grid_point_index_ref ) else: # grid_point_index is not an uninit point or candidate # point grid_point_index_ref = grid_point_index grid_point_index_ref.array_index = \ self.num_grid_point_indices_generated self.num_grid_point_indices_generated += 1 self.evaluate_target_function( grid_point_index_ref ) self.refinement_manager.prioritize_grid_point( grid_point_index_ref ) self.insert_candidate_grid_point( grid_point_index_ref ) self.insert_grid_point( grid_point_index ) def insert_missing_parent_grid_points( self, grid_point_index ): for d in xrange( grid_point_index.num_effect_dims() ): dim = grid_point_index.dimension_from_array_index ( d ) parent_index = self.quadrature_rule.parent_index( grid_point_index, dim ) if ( parent_index not in self.grid_point_indices ): self.insert_parent_grid_point( parent_index ) def evaluate_target_function( self, grid_point_index ): coord = self.quadrature_rule.coordinate( grid_point_index ) coord = coord.reshape( coord.shape[0], 1 ) array_index = grid_point_index.array_index self.data.set_fn_vals( self.target_function( coord ), array_index ) hier_surplus = \ self.compute_hierarical_surpluses( self.data.fn_vals[array_index], coord, grid_point_index ) self.data.set_hier_surplus( hier_surplus, array_index ) self.refinement_manager.num_model_runs +=1 def prioritize( self ): for grid_point_index in self.uninit_grid_point_indices: if ( self.refinement_manager.candidate_point_needs_func_eval() ): self.evaluate_target_function( grid_point_index ) self.refinement_manager.prioritize_grid_point( grid_point_index ) self.insert_candidate_grid_point( grid_point_index ) self.uninit_grid_point_indices = set()
def pce_study( build_pts, build_vals, domain, test_pts, test_vals, results_file = None, cv_file = None, solver_type = 2 ): num_dims = build_pts.shape[0] index_generator = IndexGenerator() poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, order = 0, basis = basis, func_domain = domain ) if ( solver_type == 1 ): num_folds = build_pts.shape[1] else: num_folds = 20 index_norm_orders = numpy.linspace( 0.4, 1.0, 4 ) #if (solver_tupe == 1): # index_norm_orders = [.4,.5,.6,.7,.8,.9,1.] #solvers = numpy.array( [solver_type], numpy.int32 ) #cv_params_grid_array = cartesian_product( [solvers,orders] ) cv_params_grid = [] for index_norm_order in index_norm_orders: level = 2 # determine what range of orders to consider. # spefically consider any order that results in a pce with terms <= 3003 while ( True ): #index_generator.set_parameters( num_dims, level, # index_norm_order = index_norm_order) indices = index_generator.get_isotropic_indices( num_dims, level, index_norm_order ) num_indices = len( indices ) print level, index_norm_order, len ( indices ) if ( num_indices > 3003 ): break cv_params = {} cv_params['solver'] = solver_type cv_params['order'] = level cv_params['index_norm_order'] = index_norm_order if ( cv_params['solver'] > 1 or num_indices <= build_pts.shape[1] ): # only do least squares on over-determined systems cv_params_grid.append( cv_params ) level += 1 print cv_params_grid # cv_iterator = LeaveOneOutCrossValidationIterator() cv_iterator = KFoldCrossValidationIterator( num_folds = num_folds ) CV = GridSearchCrossValidation( cv_iterator, pce, use_predictor_cross_validation = True, use_fast_predictor_cross_validation = True ) t0 = time.time() CV.run( build_pts, build_vals, cv_params_grid ) time_taken = time.time() - t0 print 'cross validation took ', time_taken, ' seconds' print "################" print "Best cv params: ", CV.best_cv_params print "Best cv score: ", CV.best_score print "################" #for i in xrange( len( CV.cv_params_set ) ): # print CV.cv_params_set[i], CV.scores[i] best_order = CV.best_cv_params['order'] best_index_norm_order = CV.best_cv_params['index_norm_order'] best_pce = PCE( num_dims, order = best_order, basis = basis, func_domain = domain, index_norm_order = best_index_norm_order) V = best_pce.vandermonde( build_pts ).T best_pce.set_solver( CV.best_cv_params['solver'] ) if cv_params['solver'] != 1 and cv_params['solver'] != 5: best_res_tol = CV.best_cv_params['norm_residual'] best_pce.linear_solver.residual_tolerance = best_res_tol sols, sol_metrics = best_pce.linear_solver.solve( V, build_vals ) coeff = sols[:,-1] best_pce.set_coefficients( coeff ) error = abs( build_vals - best_pce.evaluate_set( build_pts ) ) print max( error ) print 'Evaluating best pce at test points' num_test_pts = test_pts.shape[1] pce_vals_pred = best_pce.evaluate_set( test_pts ).T print test_vals.shape, pce_vals_pred.shape error = test_vals.squeeze() - pce_vals_pred linf_error = numpy.max( numpy.absolute( error ) ) l2_error = numpy.sqrt( numpy.dot( error.T, error ) / num_test_pts ) mean = numpy.mean( pce_vals_pred ) var = numpy.var( pce_vals_pred ) pce_mean = best_pce.mean() pce_var = best_pce.variance() if results_file is not None: results_file.write( '%1.15e' %linf_error + ',' + '%1.15e' %l2_error + ',' + '%1.15e' %mean + ',' + '%1.15e' %var + ',%1.15e' %pce_mean + ',' + '%1.15e' %pce_var + '\n') print "linf error: ", linf_error print "l2 error: ", l2_error print "mean: ", mean print "var: ", var print "pce mean: ", pce_mean print "pce var: ", pce_var
def cv_vs_error_study( build_pts, build_vals, domain, test_pts, test_vals, results_file = None, cv_file = None, solver_type = 2 ): num_dims = build_pts.shape[0] if ( num_dims == 10 ): max_order = 5 elif ( num_dims == 15 ): max_order = 4 else: max_order = 3 poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, order = 0, basis = basis, func_domain = domain ) orders = numpy.arange( 1, max_order + 1 ) solvers = numpy.array( [solver_type], numpy.int32 ) cv_params_grid_array = cartesian_product( [solvers,orders] ) cv_params_grid = [] for i in xrange( cv_params_grid_array.shape[0] ): cv_params = {} cv_params['solver'] = numpy.int32( cv_params_grid_array[i,0] ) cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] ) num_pce_terms = polynomial_space_dimension( num_dims, cv_params['order'] ) if ( cv_params['solver'] <= 1 and num_pce_terms >= build_pts.shape[1] ): cv_params['lambda'] = 1.e-12 cv_params_grid.append( cv_params ) # print cv_params_grid # cv_iterator = LeaveOneOutCrossValidationIterator() cv_iterator = KFoldCrossValidationIterator( num_folds = 20 ) CV = GridSearchCrossValidation( cv_iterator, pce, use_predictor_cross_validation = True, use_fast_predictor_cross_validation = True ) t0 = time.time() CV.run( build_pts, build_vals, cv_params_grid ) time_taken = time.time() - t0 print 'cross validation took ', time_taken, ' seconds' print "################" print "Best cv params: ", CV.best_cv_params print "Best cv score: ", CV.best_score print "################" for order in orders: residual_norms = numpy.empty( len( CV.cv_params_set ), numpy.double ) scores = numpy.empty( len( CV.cv_params_set ), numpy.double ) k = 0 for i in xrange( len( CV.cv_params_set ) ): if ( CV.cv_params_set[i]['order'] == order ): residual_norms[k] = CV.cv_params_set[i]['norm_residual'] scores[k] = CV.scores[i] k += 1 residual_norms.resize( k ) scores.resize( k ) pce = PCE( num_dims, order = order, basis = basis, func_domain = domain ) V = pce.vandermonde( build_pts ).T pce.set_solver( CV.best_cv_params['solver'] ) # pce.linear_solver.max_iterations = 3 sols, sol_metrics = pce.linear_solver.solve( V, build_vals ) from sklearn.linear_model import orthogonal_mp l2_error = numpy.empty( ( sols.shape[1] ), numpy.double ) residuals = numpy.empty( ( sols.shape[1] ), numpy.double ) test_pts = numpy.random.uniform( 0., 1., ( num_dims, 1000 ) ) f = GenzModel( domain, 'oscillatory' ) # f.set_coefficients( 4.5, 'no-decay' ) f.set_coefficients( 4.5, 'quadratic-decay' ) test_vals = f( test_pts ).reshape( ( test_pts.shape[1], 1 ) ) for i in xrange( sols.shape[1] ): coeff = sols[:,i] pce.set_coefficients( coeff ) residuals[i] = numpy.linalg.norm( build_vals - pce.evaluate_set( build_pts ) ) num_test_pts = test_pts.shape[1] pce_vals_pred = pce.evaluate_set( test_pts ).T error = test_vals.squeeze() - pce_vals_pred l2_error[i] = numpy.linalg.norm( error ) / numpy.sqrt( num_test_pts ) import pylab print residuals, l2_error print residual_norms, scores pylab.loglog( residuals, l2_error, label = str( order ) + 'true' ) pylab.loglog( residual_norms, scores, label = str( order )+'-cv' ) pylab.xlim([1e-3,10]) pylab.legend() pylab.show()
def pce_study( build_pts, build_vals, domain, test_pts, test_vals, results_file = None, cv_file = None, solver_type = 2 ): num_dims = build_pts.shape[0] index_generator = IndexGenerator() poly_1d = [ LegendrePolynomial1D() ] basis = TensorProductBasis( num_dims, poly_1d ) pce = PCE( num_dims, order = 0, basis = basis, func_domain = domain ) if ( solver_type == 1 ): num_folds = build_pts.shape[1] else: num_folds = 20 index_norm_orders = numpy.linspace( 0.4, 1.0, 4 ) #solvers = numpy.array( [solver_type], numpy.int32 ) #cv_params_grid_array = cartesian_product( [solvers,orders] ) cv_params_grid = [] for index_norm_order in index_norm_orders: level = 2 # determine what range of orders to consider. # spefically consider any order that results in a pce with terms <= 3003 while ( True ): index_generator.set_parameters( num_dims, level, index_norm_order = index_norm_order ) index_generator.build_isotropic_index_set() print level, index_norm_order, index_generator.num_indices if ( index_generator.num_indices > 3003 ): break cv_params = {} cv_params['solver'] = solver_type cv_params['order'] = level cv_params['index_norm_order'] = index_norm_order if ( cv_params['solver'] > 1 or index_generator.num_indices <= build_pts.shape[1] ): # only do least squares on over-determined systems cv_params_grid.append( cv_params ) else: break level += 1 print cv_params_grid # cv_iterator = LeaveOneOutCrossValidationIterator() cv_iterator = KFoldCrossValidationIterator( num_folds = num_folds ) CV = GridSearchCrossValidation( cv_iterator, pce, use_predictor_cross_validation = True, use_fast_predictor_cross_validation = True ) t0 = time.time() CV.run( build_pts, build_vals, cv_params_grid ) time_taken = time.time() - t0 print 'cross validation took ', time_taken, ' seconds' print "################" print "Best cv params: ", CV.best_cv_params print "Best cv score: ", CV.best_score print "################" #for i in xrange( len( CV.cv_params_set ) ): # print CV.cv_params_set[i], CV.scores[i] best_order = CV.best_cv_params['order'] best_index_norm_order = CV.best_cv_params['index_norm_order'] best_pce = PCE( num_dims, order = best_order, basis = basis, func_domain = domain, index_norm_order = best_index_norm_order) V = best_pce.vandermonde( build_pts ).T best_pce.set_solver( CV.best_cv_params['solver'] ) if cv_params['solver'] > 1 : best_res_tol = CV.best_cv_params['norm_residual'] best_pce.linear_solver.residual_tolerance = best_res_tol sols, sol_metrics = best_pce.linear_solver.solve( V, build_vals ) coeff = sols[:,-1] best_pce.set_coefficients( coeff ) error = abs( build_vals - best_pce.evaluate_set( build_pts ) ) print max( error ) print 'Evaluating best pce at test points' num_test_pts = test_pts.shape[1] pce_vals_pred = best_pce.evaluate_set( test_pts ).T print test_vals.shape, pce_vals_pred.shape error = test_vals.squeeze() - pce_vals_pred linf_error = numpy.max( numpy.absolute( error ) ) l2_error = numpy.sqrt( numpy.dot( error.T, error ) / num_test_pts ) mean = numpy.mean( pce_vals_pred ) var = numpy.var( pce_vals_pred ) pce_mean = best_pce.mean() pce_var = best_pce.variance() if results_file is not None: results_file.write( '%1.15e' %linf_error + ',' + '%1.15e' %l2_error + ',' + '%1.15e' %mean + ',' + '%1.15e' %var + ',%1.15e' %pce_mean + ',' + '%1.15e' %pce_var + '\n') print "linf error: ", linf_error print "l2 error: ", l2_error print "mean: ", mean print "var: ", var print "pce mean: ", pce_mean print "pce var: ", pce_var me, te, ie = best_pce.get_sensitivities() interaction_values, interaction_terms = best_pce.get_interactions() show = False fignum = 1 filename = 'oscillator-individual-interactions.png' plot_interaction_values( interaction_values, interaction_terms, title = 'Sobol indices', truncation_pct = 0.95, filename = filename, show = show, fignum = fignum ) fignum += 1 filename = 'oscillator-dimension-interactions.png' plot_interaction_effects( ie, title = 'Dimension-wise joint effects', truncation_pct = 0.95, filename = filename, show = show,fignum = fignum ) fignum += 1 filename = 'oscillator-main-effects.png' plot_main_effects( me, truncation_pct = 0.95, title = 'Main effect sensitivity indices', filename = filename, show = show, fignum = fignum ) fignum += 1 filename = 'oscillator-total-effects.png' plot_total_effects( te, truncation_pct = 0.95, title = 'Total effect sensitivity indices', filename = filename, show = show, fignum = fignum ) fignum += 1 from scipy.stats.kde import gaussian_kde pylab.figure( fignum ) pce_kde = gaussian_kde( pce_vals_pred ) pce_kde_x = numpy.linspace( pce_vals_pred.min(), pce_vals_pred.max(), 100 ) pce_kde_y = pce_kde( pce_kde_x ) pylab.plot( pce_kde_x, pce_kde_y,label = 'pdf of surrogate' ) true_kde = gaussian_kde( test_vals ) true_kde_x = numpy.linspace( test_vals.min(), test_vals.max(), 100 ) true_kde_y = true_kde( true_kde_x ) pylab.plot( true_kde_x, true_kde_y, label = 'true pdf' ) pylab.legend(loc=2) pylab.show()