Example #1
0
if '__main__' in __name__:
    from polynomial_chaos.orthogonal_polynomial import LegendrePolynomial1D
    import polynomial_chaos.polynomial_chaos_expansion as PCE
    import utilities.visualisation as plt
    import examples.test_functions
    from utilities.quadrature_rules import clenshaw_curtis
    from utilities.tensor_product_domain import *

    num_dims = 2
    g = lambda x: numpy.sin( x[0,:] + x[1,:] )
    g = lambda x: examples.test_functions.matlab_peaks( 3*x )
    #g = lambda x: numpy.sin( numpy.pi*x/2+1 ).squeeze()

    domain = TensorProductDomain( num_dims, [[-1,1]] )
    
    x, w = clenshaw_curtis( 4 )
    #x = numpy.linspace(-1,1,2)

    #pts = x.reshape( 1, x.shape[0] )
    
    [X,Y] = numpy.meshgrid( x, x )
    pts = numpy.vstack( ( X.reshape( ( 1, X.shape[0]*X.shape[1] ) ), 
                          Y.reshape( ( 1, Y.shape[0]*Y.shape[1] ) ) ) )


    #num_pts = 100
    #pts = numpy.random.uniform( -1., 1., size = num_pts * num_dims )
    #pts = numpy.resize( pts, ( num_dims, num_pts ) )

    ig = IndexGenerator()
    #ig.get_isotropic_indices( num_dims, order, 1. )
 def update_precomputed_data( self, level ):
     for l in xrange( self.level+1, level+1 ):
         x, w = clenshaw_curtis( l )
         self.abscissa.append( x )
         self.weights.append( w )
     self.level = level
Example #3
0
     def xtest_gaussian_process( self ):
          #try:
          # 1D test
          #build_pts, tmp = clenshaw_curtis( 2 )
          build_pts = numpy.linspace(-.85,.9,7)
          build_pts = numpy.atleast_2d( build_pts )
          #build_pts = numpy.atleast_2d([-.8, -.4, 0., .2, .4, .6])
          build_fn_vals = self.f_1d( build_pts ).T

          num_dims = 1
          func_domain = TensorProductDomain( num_dims, [[-1,1]] )
          GP = GaussianProcess()
          GP.function_domain( func_domain )
          GP.build( build_pts, build_fn_vals )

          a = -1.0; b = 1.0
          eval_pts = numpy.linspace( a, b, 301 ).reshape(1,301)
          pred_vals = GP.evaluate_set( eval_pts )

          #reshape matrices for easier plotting
          build_pts = build_pts[0,:]
          build_fn_vals = build_fn_vals[:,0]
          pred_var = GP.evaluate_surface_variance( eval_pts )
          eval_pts = eval_pts[0,:]

          fig = plot.pylab.figure()
          plot.pylab.plot( eval_pts, self.f_1d(eval_pts), 
                           'r:', label=u'$f(x) = x\,\sin(x)$' )
          plot.pylab.plot( build_pts, build_fn_vals, 'r.', 
                           markersize=10, label=u'Observations' )
          plot.pylab.plot( eval_pts, pred_vals, 
                           'b-', label=u'Prediction' )
          
          # do I need to multiply pred_var by 1.96 to get 95% confidence interval
          plot.pylab.fill( numpy.concatenate([eval_pts, eval_pts[::-1]]),
                           numpy.concatenate([pred_vals-1.96*pred_var, 
                                              (pred_vals+1.96*pred_var)[::-1]]), 
                           alpha=.5, fc='b', ec='None', 
                           label='95\% confidence interval')

          plot.pylab.xlabel('$x$')
          plot.pylab.ylabel('$f(x)$')
          plot.pylab.ylim(-10, 20)
          plot.pylab.legend(loc='upper left')
          #plot.pylab.show()
          
          # 2D test
          level = [3,3]
          nodes_0, tmp = clenshaw_curtis( level[0] )
          nodes_1, tmp = clenshaw_curtis( level[1] )
          build_pts_1d = [nodes_0,nodes_1]
          build_pts = cartesian_product( build_pts_1d ).T

          build_pts = numpy.array([[-4.61611719, -6.00099547],
                        [4.10469096, 5.32782448],
                        [0.00000000, -0.50000000],
                        [-6.17289014, -4.6984743],
                        [1.3109306, -6.93271427],
                        [-5.03823144, 3.10584743],
                        [-2.87600388, 6.74310541],
                        [5.21301203, 4.26386883]])
          build_pts = hypercube_map( build_pts.T, [-8,8,-8,8],[-1,1,-1,1])

          build_fn_vals = self.g( build_pts )

          build_fn_vals = build_fn_vals.reshape( ( build_fn_vals.shape[0], 1 ) )

          num_dims = 2
          func_domain = TensorProductDomain( num_dims, [[-1,1]] )
          GP = GaussianProcess()
          GP.function_domain( func_domain )
          GP.build( build_pts, build_fn_vals )
          
          a = -1.0; b = 1.0
          x = numpy.linspace( a, b, 50 )
          [X,Y] = numpy.meshgrid( x, x )
          eval_pts = numpy.vstack((X.reshape((1,X.shape[0]*X.shape[1] ) ), 
                                   Y.reshape( ( 1, Y.shape[0]*Y.shape[1]))))
          pred_vals = GP.evaluate_set( eval_pts )
          pred_var = GP.evaluate_surface_variance( eval_pts )
          true_vals = self.g( eval_pts )
 
          pred_vals = pred_vals.reshape( ( X.shape[0], X.shape[1] ) )
          pred_var = pred_var.reshape( ( X.shape[0], X.shape[1] ) )
          true_vals = true_vals.reshape( ( X.shape[0], X.shape[1] ) )

          fig = plot.pylab.figure(2)
          ax = fig.add_subplot(111)
          ax.axes.set_aspect('equal')
          plot.pylab.xticks([])
          plot.pylab.yticks([])
          ax.set_xticklabels([])
          ax.set_yticklabels([])
          plot.pylab.xlabel('$x_1$')
          plot.pylab.ylabel('$x_2$')

          # Standard normal distribution functions
          phi = stats.distributions.norm().pdf
          PHI = stats.distributions.norm().cdf
          PHIinv = stats.distributions.norm().ppf
          lim = b # set plot limits
          pred_var = numpy.sqrt(pred_var)# * 1.96

          build_fn_vals = build_fn_vals[:,0]
          #normalize prediction variance
          pred_var = pred_var / numpy.max( numpy.max( pred_var ) )

          cax = plot.pylab.imshow(numpy.flipud(pred_var), 
                                  cmap=cm.gray_r, alpha=0.8,
                                  extent=(- lim, lim, - lim, lim))
          norm = plot.pylab.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
          cb = plot.pylab.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
          cb.set_label(r'$\varepsilon[f(x)]$')
          
          plot.pylab.plot(build_pts[0, build_fn_vals <= 0], build_pts[1, build_fn_vals <= 0], 'r.', markersize=12)

          plot.pylab.plot(build_pts[0, build_fn_vals > 0], build_pts[1, build_fn_vals > 0 ], 'b.', markersize=12)
          
          
          cs = plot.pylab.contour(X, Y, true_vals, [0.9], colors='k', linestyles='dashdot')
          
          cs = plot.pylab.contour(X, Y, pred_var, [0.25], colors='b',
                          linestyles='solid')
          plot.pylab.clabel(cs, fontsize=11)

          cs = plot.pylab.contour(X, Y, pred_var, [0.5], colors='k',
                          linestyles='dashed')
          plot.pylab.clabel(cs, fontsize=11)

          cs = plot.pylab.contour(X, Y, pred_var, [0.75], colors='r',
                          linestyles='solid')
          plot.pylab.clabel(cs, fontsize=11)
          
          #plot.pylab.show()
          
          plot.plot_surface( X, Y, true_vals,
                            show = False, fignum = 3,
                            axislabels = None, 
                            title = None )