# Todo: must scale x to -1,1 when evaluating pce pce_vals = pce.value( pts ) exact_vals = TestFunctions.matlab_peaks( pts ) error = abs( exact_vals-pce_vals ) print numpy.mean( pce_vals ), numpy.var( pce_vals ) print numpy.linalg.norm( error ) / numpy.sqrt( error.shape[0] ) print numpy.max( error ) error = numpy.reshape( error, ( X.shape[0], X.shape[1]) ) pce_vals = numpy.reshape( pce_vals, ( X.shape[0], X.shape[1]) ) exact_vals = numpy.reshape( exact_vals, ( X.shape[0], X.shape[1]) ) plt.plot_surface( X, Y, exact_vals, show = False, fignum = 1, axislabels = None, title = None ) plt.plot_surface( X, Y, pce_vals, show = False, fignum = 2, axislabels = None, title = None ) plt.plot_surface( X, Y, numpy.log( error ), show = False, fignum = 3, axislabels = None, title = None ) pylab.show()
basis = basis, func_domain = domain )#, #linear_solver = LeastInterpolation() ) pce_least.basis_indices = basis_indices pce_least.set_coefficients( coeff ) error = abs( vals - pce_least.evaluate_set( pts ) ) print 'Max error at the interpolation nodes is %1.3e'% max( error ); print 'Absolute error in the PCE mean is %1.3e' %( abs( pce_least.mean()-0.0 ) ) print 'Absolute error in the PCE variance is %1.3e' %( abs( pce_least.variance() - ( 7.+numpy.cos( 4. ) )/4./4. ) ) #import pylab #test_pts = numpy.linspace( -1., 1. , 100 ).reshape( 1, 100 ) #pylab.plot( pts.squeeze(), vals, 'ro' ) #pylab.plot( test_pts.squeeze(), g(test_pts), 'r' ) #pylab.plot( test_pts.squeeze(), pce_least.evaluate_set( test_pts ) ) #pylab.show() x = numpy.linspace( -1., 1., 100 ); [X,Y] = numpy.meshgrid( x, x ) pts = numpy.vstack( ( X.reshape( ( 1, X.shape[0]*X.shape[1] ) ), Y.reshape( ( 1, Y.shape[0]*Y.shape[1] ) ) ) ) vals = g( pts ) pce_vals = pce_least.evaluate_set( pts ) print 'l2 (RMSE) error in the PCE surface is %1.3e' %( numpy.linalg.norm( vals-pce_vals ) / numpy.sqrt( pts.shape[1] ) ) pce_vals = numpy.reshape( pce_vals, ( X.shape[0], X.shape[1]) ) plt.plot_surface( X, Y, pce_vals, show = True, fignum = 1, axislabels = None, title = None )
# must use numpy.int32 (c++ int) numpy.array([0,1],numpy.int32) ) else: poly_vals = multivariate_lagrange_interpolation( pts, abscissa_1d, fn_vals.reshape( 1, fn_vals.shape[0] ), # cannot use numpy.int (c++ long) for IntVectors # must use numpy.int32 (c++ int) numpy.array([0,1],numpy.int32) ) """ poly_vals = multivariate_lagrange_interpolation( pts.T, abscissa_1d, fn_vals, [0,1] ) """ print numpy.linalg.norm( poly_vals.squeeze() - f( pts ) ) assert -1==1 #poly_vals = f( pts ) poly_vals = poly_vals.reshape( ( X.shape[0], X.shape[1] ) ) plt.plot_surface( X, Y, poly_vals, show = False, fignum = 2, axislabels = None, title = None ) pylab.show()
def xtest_gaussian_process( self ): #try: # 1D test #build_pts, tmp = clenshaw_curtis( 2 ) build_pts = numpy.linspace(-.85,.9,7) build_pts = numpy.atleast_2d( build_pts ) #build_pts = numpy.atleast_2d([-.8, -.4, 0., .2, .4, .6]) build_fn_vals = self.f_1d( build_pts ).T num_dims = 1 func_domain = TensorProductDomain( num_dims, [[-1,1]] ) GP = GaussianProcess() GP.function_domain( func_domain ) GP.build( build_pts, build_fn_vals ) a = -1.0; b = 1.0 eval_pts = numpy.linspace( a, b, 301 ).reshape(1,301) pred_vals = GP.evaluate_set( eval_pts ) #reshape matrices for easier plotting build_pts = build_pts[0,:] build_fn_vals = build_fn_vals[:,0] pred_var = GP.evaluate_surface_variance( eval_pts ) eval_pts = eval_pts[0,:] fig = plot.pylab.figure() plot.pylab.plot( eval_pts, self.f_1d(eval_pts), 'r:', label=u'$f(x) = x\,\sin(x)$' ) plot.pylab.plot( build_pts, build_fn_vals, 'r.', markersize=10, label=u'Observations' ) plot.pylab.plot( eval_pts, pred_vals, 'b-', label=u'Prediction' ) # do I need to multiply pred_var by 1.96 to get 95% confidence interval plot.pylab.fill( numpy.concatenate([eval_pts, eval_pts[::-1]]), numpy.concatenate([pred_vals-1.96*pred_var, (pred_vals+1.96*pred_var)[::-1]]), alpha=.5, fc='b', ec='None', label='95\% confidence interval') plot.pylab.xlabel('$x$') plot.pylab.ylabel('$f(x)$') plot.pylab.ylim(-10, 20) plot.pylab.legend(loc='upper left') #plot.pylab.show() # 2D test level = [3,3] nodes_0, tmp = clenshaw_curtis( level[0] ) nodes_1, tmp = clenshaw_curtis( level[1] ) build_pts_1d = [nodes_0,nodes_1] build_pts = cartesian_product( build_pts_1d ).T build_pts = numpy.array([[-4.61611719, -6.00099547], [4.10469096, 5.32782448], [0.00000000, -0.50000000], [-6.17289014, -4.6984743], [1.3109306, -6.93271427], [-5.03823144, 3.10584743], [-2.87600388, 6.74310541], [5.21301203, 4.26386883]]) build_pts = hypercube_map( build_pts.T, [-8,8,-8,8],[-1,1,-1,1]) build_fn_vals = self.g( build_pts ) build_fn_vals = build_fn_vals.reshape( ( build_fn_vals.shape[0], 1 ) ) num_dims = 2 func_domain = TensorProductDomain( num_dims, [[-1,1]] ) GP = GaussianProcess() GP.function_domain( func_domain ) GP.build( build_pts, build_fn_vals ) a = -1.0; b = 1.0 x = numpy.linspace( a, b, 50 ) [X,Y] = numpy.meshgrid( x, x ) eval_pts = numpy.vstack((X.reshape((1,X.shape[0]*X.shape[1] ) ), Y.reshape( ( 1, Y.shape[0]*Y.shape[1])))) pred_vals = GP.evaluate_set( eval_pts ) pred_var = GP.evaluate_surface_variance( eval_pts ) true_vals = self.g( eval_pts ) pred_vals = pred_vals.reshape( ( X.shape[0], X.shape[1] ) ) pred_var = pred_var.reshape( ( X.shape[0], X.shape[1] ) ) true_vals = true_vals.reshape( ( X.shape[0], X.shape[1] ) ) fig = plot.pylab.figure(2) ax = fig.add_subplot(111) ax.axes.set_aspect('equal') plot.pylab.xticks([]) plot.pylab.yticks([]) ax.set_xticklabels([]) ax.set_yticklabels([]) plot.pylab.xlabel('$x_1$') plot.pylab.ylabel('$x_2$') # Standard normal distribution functions phi = stats.distributions.norm().pdf PHI = stats.distributions.norm().cdf PHIinv = stats.distributions.norm().ppf lim = b # set plot limits pred_var = numpy.sqrt(pred_var)# * 1.96 build_fn_vals = build_fn_vals[:,0] #normalize prediction variance pred_var = pred_var / numpy.max( numpy.max( pred_var ) ) cax = plot.pylab.imshow(numpy.flipud(pred_var), cmap=cm.gray_r, alpha=0.8, extent=(- lim, lim, - lim, lim)) norm = plot.pylab.matplotlib.colors.Normalize(vmin=0., vmax=0.9) cb = plot.pylab.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm) cb.set_label(r'$\varepsilon[f(x)]$') plot.pylab.plot(build_pts[0, build_fn_vals <= 0], build_pts[1, build_fn_vals <= 0], 'r.', markersize=12) plot.pylab.plot(build_pts[0, build_fn_vals > 0], build_pts[1, build_fn_vals > 0 ], 'b.', markersize=12) cs = plot.pylab.contour(X, Y, true_vals, [0.9], colors='k', linestyles='dashdot') cs = plot.pylab.contour(X, Y, pred_var, [0.25], colors='b', linestyles='solid') plot.pylab.clabel(cs, fontsize=11) cs = plot.pylab.contour(X, Y, pred_var, [0.5], colors='k', linestyles='dashed') plot.pylab.clabel(cs, fontsize=11) cs = plot.pylab.contour(X, Y, pred_var, [0.75], colors='r', linestyles='solid') plot.pylab.clabel(cs, fontsize=11) #plot.pylab.show() plot.plot_surface( X, Y, true_vals, show = False, fignum = 3, axislabels = None, title = None )