コード例 #1
0
ファイル: sparse_grids.py プロジェクト: jjakeman/pyheat
def get_polynomial_indices_of_subspace_index( subspace_index, 
					      quad_rule ):
    num_dims = quad_rule.num_dims

    if subspace_index.is_root():
        return [PolynomialIndex(None)]

    indices_1d = []
    for d in xrange( subspace_index.num_effect_dims() ):
        dim = subspace_index.dimension_from_array_index( d )
        level = subspace_index.level_from_array_index( d )
        # nodel point indices will only work for nested rules
        # if no nested rule is used we will have to find the 
        # number of unique points in all subspaces with level l < level
        num_unique_points = \
            quad_rule.quadrature_rule_objects[quad_rule.indices[dim]].num_nodal_points( level )
        indices_1d.append( numpy.hstack( ( dim * numpy.ones( ( num_unique_points, 1 ), numpy.int32 ), numpy.arange( num_unique_points, dtype=numpy.int32 ).reshape( num_unique_points, 1 ) ) ) )
        indices_1d[d] = \
            indices_1d[d].reshape(indices_1d[d].shape[0]*indices_1d[d].shape[1])

    poly_indices_data = cartesian_product( indices_1d, 2 )

    poly_indices = []
    num_poly_indices = poly_indices_data.shape[0]
    for i in xrange( num_poly_indices ):
        poly_indices.append( PolynomialIndex( poly_indices_data[i,:].reshape( poly_indices_data[i,:].shape[0] / 2, 2 ) ) )
    return poly_indices	
コード例 #2
0
ファイル: test_utilities.py プロジェクト: jjakeman/pyheat
     def test_cartesian_product( self ):

         # test when num elems = 1
         s1 = numpy.arange( 0, 3 )
         s2 = numpy.arange( 3, 5 )

         sets = numpy.array( [[0,3], [1,3], [2,3], [0,4], 
                              [1,4], [2,4]], numpy.int )
         output_sets = cartesian_product( [s1,s2], 1 )
         assert numpy.array_equal( output_sets, sets )

         # test when num elems > 1
         s1 = numpy.arange( 0, 6 )
         s2 = numpy.arange( 6, 10 )

         sets = numpy.array( [[ 0, 1, 6, 7], [ 2, 3, 6, 7],
                              [ 4, 5, 6, 7], [ 0, 1, 8, 9],
                              [ 2, 3, 8, 9], [ 4, 5, 8, 9]], numpy.int )
         output_sets = cartesian_product( [s1,s2], 2 )
         assert numpy.array_equal( output_sets, sets )
コード例 #3
0
     def xtest_grid_search_cross_validation( self ):

          f_1d = lambda x: x**10

          build_pts = numpy.linspace(-.85,.9,14)
          build_pts = numpy.atleast_2d( build_pts )
          build_vals = f_1d( build_pts ).T

          # Test grid search cross validation when applied to Gaussian Process
          num_dims = 1
          func_domain = TensorProductDomain( num_dims, [[-1,1]] )
          GP = GaussianProcess()
          GP.set_verbosity( 0 )
          GP.function_domain( func_domain )

          loo_cv_iterator = LeaveOneOutCrossValidationIterator()
          CV = GridSearchCrossValidation( loo_cv_iterator, GP )
          CV.run( build_pts, build_vals )         
          
          I = numpy.arange( build_pts.shape[1] )
          for i in xrange( build_pts.shape[1] ):
               if i == 0 : J = I[1:]
               elif i == build_pts.shape[1]-1 : J = I[:-1]
               else: J = numpy.hstack( ( I[:i], I[i+1:] ) )
               train_pts = build_pts[:,J]
               train_vals = build_vals[J,:]
               GP.build( train_pts, train_vals )
               pred_vals = GP.evaluate_set( build_pts )
               assert numpy.allclose( build_vals[i,0]-pred_vals[i],
                                      CV.residuals[0][i] )

          # Test grid search cross validation when applied to polynomial chaos
          # expansions that are built using ridge regression
          # The vandermonde matrix is built from scratch every time by the pce
          num_dims = 1
          order = 3
          build_vals = f_1d( build_pts ).T
          poly_1d = [ LegendrePolynomial1D() ]
          basis = TensorProductBasis( num_dims, poly_1d )
          pce = PCE( num_dims, basis, order, func_domain )

          loo_cv_iterator = LeaveOneOutCrossValidationIterator()
          CV = GridSearchCrossValidation( loo_cv_iterator, pce )
          CV.run( build_pts, build_vals )

          I = numpy.arange( build_pts.shape[1] )
          V = pce.vandermonde( build_pts ).T
          for i in xrange( V.shape[0] ):
               if i == 0 : J = I[1:]
               elif i == build_pts.shape[1]-1 : J = I[:-1]
               else: J = numpy.hstack( ( I[:i], I[i+1:] ) )
               A =  V[J,:]
               b = build_vals[J,:]
               x = ridge_regression( A, b )
               assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i],
                                      CV.residuals[0][i] )

          # Test grid search cross validation when applied to polynomial chaos
          # expansions that are built using ridge regression
          # Specifying  parse_cross_validation_data = True will ensure that 
          # the vandermonde matrix is not built from scratch every time by 
          # the pce
          num_dims = 1
          order = 3
          build_vals = f_1d( build_pts ).T
          poly_1d = [ LegendrePolynomial1D() ]
          basis = TensorProductBasis( num_dims, poly_1d )
          pce = PCE( num_dims, basis, order, func_domain )

          loo_cv_iterator = LeaveOneOutCrossValidationIterator()
          CV = GridSearchCrossValidation( loo_cv_iterator, pce, 
                                          use_predictor_cross_validation = True)
          CV.run( build_pts, build_vals )

          I = numpy.arange( build_pts.shape[1] )
          V = pce.vandermonde( build_pts ).T
          for i in xrange( V.shape[0] ):
               if i == 0 : J = I[1:]
               elif i == build_pts.shape[1]-1 : J = I[:-1]
               else: J = numpy.hstack( ( I[:i], I[i+1:] ) )
               A =  V[J,:]
               b = build_vals[J,:]
               x = ridge_regression( A, b )
               assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i],
                                      CV.residuals[0][i] )

          # Test grid search cross validation when applied to polynomial chaos
          # expansions that are built using ridge regression
          # A closed form for the cross validation residual is used
          num_dims = 1
          order = 3
          build_vals = f_1d( build_pts ).T
          poly_1d = [ LegendrePolynomial1D() ]
          basis = TensorProductBasis( num_dims, poly_1d )
          pce = PCE( num_dims, basis, order, func_domain )

          loo_cv_iterator = LeaveOneOutCrossValidationIterator()
          CV = GridSearchCrossValidation( loo_cv_iterator, pce, 
                                     use_predictor_cross_validation = True,
                                     use_fast_predictor_cross_validation = True )
          CV.run( build_pts, build_vals )

          I = numpy.arange( build_pts.shape[1] )
          V = pce.vandermonde( build_pts ).T
          for i in xrange( V.shape[0] ):
               if i == 0 : J = I[1:]
               elif i == build_pts.shape[1]-1 : J = I[:-1]
               else: J = numpy.hstack( ( I[:i], I[i+1:] ) )
               A =  V[J,:]
               b = build_vals[J,:]
               x = ridge_regression( A, b )
               assert numpy.allclose( (build_vals[i,0]-numpy.dot( V, x ))[i],
                                      CV.residuals[0][i] )

          # Test grid search cross validation when applied to polynomial chaos
          # expansions that are built using ridge regression
          num_dims = 1
          order = 3
          build_vals = f_1d( build_pts ).T
          poly_1d = [ LegendrePolynomial1D() ]
          basis = TensorProductBasis( num_dims, poly_1d )
          pce = PCE( num_dims, basis, order, func_domain )

          max_order = build_pts.shape[1]
          orders = numpy.arange( 1, max_order )
          lamda = numpy.array( [0.,1e-3,1e-2,1e-1] )
          # note cartesian product takes type from first array in 1d sets
          # so if I use orders first lamda will be rounded to 0
          cv_params_grid_array = cartesian_product( [lamda,orders] )

          cv_params_grid = []
          for i in xrange( cv_params_grid_array.shape[0] ):
               cv_params = {}
               cv_params['lambda'] = cv_params_grid_array[i,0]
               cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] )
               cv_params_grid.append( cv_params )

          loo_cv_iterator = LeaveOneOutCrossValidationIterator()
          CV = GridSearchCrossValidation( loo_cv_iterator, pce, 
                                    use_predictor_cross_validation = True,
                                    use_fast_predictor_cross_validation = False )
          CV.run( build_pts, build_vals, cv_params_grid )

          k = 0
          I = numpy.arange( build_pts.shape[1] )
          for cv_params in cv_params_grid:
               order = cv_params['order']
               lamda = cv_params['lambda']
               pce.set_order( order )
               V = pce.vandermonde( build_pts ).T
               for i in xrange( V.shape[0] ):
                    if i == 0 : J = I[1:]
                    elif i == build_pts.shape[1]-1 : J = I[:-1]
                    else: J = numpy.hstack( ( I[:i], I[i+1:] ) )
                    A =  V[J,:]
                    b = build_vals[J,:]
                    x = ridge_regression( A, b, lamda = lamda )
                    assert numpy.allclose( ( build_vals[i,0]-
                                             numpy.dot( V, x ) )[i],
                                           CV.residuals[k][i] )
               k += 1

          print 'best',CV.best_cv_params

          # Test grid search cross validation when applied to 
          # expansions that are built using a step based method
          # ( LARS )
          num_dims = 1
          order = 3
          build_vals = f_1d( build_pts ).T
          poly_1d = [ LegendrePolynomial1D() ]
          basis = TensorProductBasis( num_dims, poly_1d )
          pce = PCE( num_dims, basis, order, func_domain )

          max_order = build_pts.shape[1]
          orders = numpy.arange( 1, max_order )
          lamda = numpy.array( [0.,1e-3,1e-2,1e-1] )
          # note cartesian product takes type from first array in 1d sets
          # so if I use orders first lamda will be rounded to 0
          cv_params_grid_array = cartesian_product( [lamda,orders] )

          cv_params_grid = []
          for i in xrange( cv_params_grid_array.shape[0] ):
               cv_params = {}
               cv_params['solver'] = 4 # LARS
               cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] )
               cv_params_grid.append( cv_params )

          print cv_params_grid

          loo_cv_iterator = LeaveOneOutCrossValidationIterator()
          #loo_cv_iterator = KFoldCrossValidationIterator( 3 )
          CV = GridSearchCrossValidation( loo_cv_iterator, pce, 
                                    use_predictor_cross_validation = True,
                                    use_fast_predictor_cross_validation = False )
          CV.run( build_pts, build_vals, cv_params_grid )

          k = 0
          I = numpy.arange( build_pts.shape[1] )
          for cv_params in cv_params_grid:
               order = cv_params['order']
               pce.set_order( order )
               V = pce.vandermonde( build_pts ).T
               for i in xrange( V.shape[0] ):
                    if i == 0 : J = I[1:]
                    elif i == build_pts.shape[1]-1 : J = I[:-1]
                    else: J = numpy.hstack( ( I[:i], I[i+1:] ) )
                    A =  V[J,:]
                    b = build_vals[J,:]
                    b = b.reshape( b.shape[0] )
                    x, metrics = least_angle_regression( A, b, 0., 4, 0., 1000, 
                                                         0 )
                    assert numpy.allclose( ( build_vals[i,0]-
                                             numpy.dot( V, x ) )[i],
                                           CV.residuals[k][i] )
               k += 1

          #for i in xrange( len( CV.cv_params_set ) ):
          #     print CV.cv_params_set[i], CV.scores[i]

          print 'best param', CV.best_cv_params
          print 'best score', CV.best_score
          print build_pts.shape[1]

          # ( OMP )
          num_dims = 1
          order = 3
          build_vals = f_1d( build_pts ).T
          poly_1d = [ LegendrePolynomial1D() ]
          basis = TensorProductBasis( num_dims, poly_1d )
          pce = PCE( num_dims, basis, order, func_domain )

          max_order = build_pts.shape[1]
          orders = numpy.arange( 1, max_order )
          lamda = numpy.array( [0.,1e-3,1e-2,1e-1] )
          # note cartesian product takes type from first array in 1d sets
          # so if I use orders first lamda will be rounded to 0
          cv_params_grid_array = cartesian_product( [lamda,orders] )

          cv_params_grid = []
          for i in xrange( cv_params_grid_array.shape[0] ):
               cv_params = {}
               cv_params['solver'] = 2 # OMP
               cv_params['order'] = numpy.int32( cv_params_grid_array[i,1] )
               cv_params_grid.append( cv_params )

          print cv_params_grid

          loo_cv_iterator = LeaveOneOutCrossValidationIterator()
          #loo_cv_iterator = KFoldCrossValidationIterator( 3 )
          CV = GridSearchCrossValidation( loo_cv_iterator, pce, 
                                    use_predictor_cross_validation = True,
                                    use_fast_predictor_cross_validation = False )
          CV.run( build_pts, build_vals, cv_params_grid )

          k = 0
          I = numpy.arange( build_pts.shape[1] )
          for cv_params in cv_params_grid:
               order = cv_params['order']
               pce.set_order( order )
               V = pce.vandermonde( build_pts ).T
               for i in xrange( V.shape[0] ):
                    if i == 0 : J = I[1:]
                    elif i == build_pts.shape[1]-1 : J = I[:-1]
                    else: J = numpy.hstack( ( I[:i], I[i+1:] ) )
                    A =  V[J,:]
                    b = build_vals[J,:]
                    b = b.reshape( b.shape[0] )
                    x, metrics = orthogonal_matching_pursuit( A, b, 0., 1000, 0 )
                    assert numpy.allclose( ( build_vals[i,0]-
                                             numpy.dot( V, x ) )[i],
                                           CV.residuals[k][i] )
               k += 1

          #for i in xrange( len( CV.cv_params_set ) ):
          #     print CV.cv_params_set[i], CV.scores[i]

          print 'best param', CV.best_cv_params
          print 'best score', CV.best_score
          print build_pts.shape[1]
コード例 #4
0
    #                                         x.reshape( ( 1, x.shape[0] ) ) ) ) ).T

    print pts.shape
                          

    level = [7,6]
    abscissa_1d = []
    barycentric_weights_1d = []
    for l in level:
        nodes, tmp = clenshaw_curtis( l )
        nodes = hypercube_map_1d( nodes, -1., 1., a, b )
        abscissa_1d.append( nodes.copy() )
        barycentric_weights_1d.append(clenshaw_curtis_barycentric_weights( l ).copy() )

    from utilities.math_utils import cartesian_product
    abscissa = cartesian_product( abscissa_1d ).T
    fn_vals = f( abscissa )
    print fn_vals.shape
    print len( barycentric_weights_1d )
    print len( abscissa_1d )

    a = True
    #a = False
    if ( a ):
        #poly_vals = multivariate_lagrange_interpolation( 
        from interpolation_cpp import multivariate_barycentric_lagrange_interpolation as multivariate_barycentric_lagrange_interpolation_cpp
        poly_vals = multivariate_barycentric_lagrange_interpolation_cpp(
            pts, 
            abscissa_1d,
            barycentric_weights_1d,
            fn_vals.reshape( 1, fn_vals.shape[0] ),
コード例 #5
0
ファイル: test_regression.py プロジェクト: jjakeman/pyheat
     def xtest_gaussian_process( self ):
          #try:
          # 1D test
          #build_pts, tmp = clenshaw_curtis( 2 )
          build_pts = numpy.linspace(-.85,.9,7)
          build_pts = numpy.atleast_2d( build_pts )
          #build_pts = numpy.atleast_2d([-.8, -.4, 0., .2, .4, .6])
          build_fn_vals = self.f_1d( build_pts ).T

          num_dims = 1
          func_domain = TensorProductDomain( num_dims, [[-1,1]] )
          GP = GaussianProcess()
          GP.function_domain( func_domain )
          GP.build( build_pts, build_fn_vals )

          a = -1.0; b = 1.0
          eval_pts = numpy.linspace( a, b, 301 ).reshape(1,301)
          pred_vals = GP.evaluate_set( eval_pts )

          #reshape matrices for easier plotting
          build_pts = build_pts[0,:]
          build_fn_vals = build_fn_vals[:,0]
          pred_var = GP.evaluate_surface_variance( eval_pts )
          eval_pts = eval_pts[0,:]

          fig = plot.pylab.figure()
          plot.pylab.plot( eval_pts, self.f_1d(eval_pts), 
                           'r:', label=u'$f(x) = x\,\sin(x)$' )
          plot.pylab.plot( build_pts, build_fn_vals, 'r.', 
                           markersize=10, label=u'Observations' )
          plot.pylab.plot( eval_pts, pred_vals, 
                           'b-', label=u'Prediction' )
          
          # do I need to multiply pred_var by 1.96 to get 95% confidence interval
          plot.pylab.fill( numpy.concatenate([eval_pts, eval_pts[::-1]]),
                           numpy.concatenate([pred_vals-1.96*pred_var, 
                                              (pred_vals+1.96*pred_var)[::-1]]), 
                           alpha=.5, fc='b', ec='None', 
                           label='95\% confidence interval')

          plot.pylab.xlabel('$x$')
          plot.pylab.ylabel('$f(x)$')
          plot.pylab.ylim(-10, 20)
          plot.pylab.legend(loc='upper left')
          #plot.pylab.show()
          
          # 2D test
          level = [3,3]
          nodes_0, tmp = clenshaw_curtis( level[0] )
          nodes_1, tmp = clenshaw_curtis( level[1] )
          build_pts_1d = [nodes_0,nodes_1]
          build_pts = cartesian_product( build_pts_1d ).T

          build_pts = numpy.array([[-4.61611719, -6.00099547],
                        [4.10469096, 5.32782448],
                        [0.00000000, -0.50000000],
                        [-6.17289014, -4.6984743],
                        [1.3109306, -6.93271427],
                        [-5.03823144, 3.10584743],
                        [-2.87600388, 6.74310541],
                        [5.21301203, 4.26386883]])
          build_pts = hypercube_map( build_pts.T, [-8,8,-8,8],[-1,1,-1,1])

          build_fn_vals = self.g( build_pts )

          build_fn_vals = build_fn_vals.reshape( ( build_fn_vals.shape[0], 1 ) )

          num_dims = 2
          func_domain = TensorProductDomain( num_dims, [[-1,1]] )
          GP = GaussianProcess()
          GP.function_domain( func_domain )
          GP.build( build_pts, build_fn_vals )
          
          a = -1.0; b = 1.0
          x = numpy.linspace( a, b, 50 )
          [X,Y] = numpy.meshgrid( x, x )
          eval_pts = numpy.vstack((X.reshape((1,X.shape[0]*X.shape[1] ) ), 
                                   Y.reshape( ( 1, Y.shape[0]*Y.shape[1]))))
          pred_vals = GP.evaluate_set( eval_pts )
          pred_var = GP.evaluate_surface_variance( eval_pts )
          true_vals = self.g( eval_pts )
 
          pred_vals = pred_vals.reshape( ( X.shape[0], X.shape[1] ) )
          pred_var = pred_var.reshape( ( X.shape[0], X.shape[1] ) )
          true_vals = true_vals.reshape( ( X.shape[0], X.shape[1] ) )

          fig = plot.pylab.figure(2)
          ax = fig.add_subplot(111)
          ax.axes.set_aspect('equal')
          plot.pylab.xticks([])
          plot.pylab.yticks([])
          ax.set_xticklabels([])
          ax.set_yticklabels([])
          plot.pylab.xlabel('$x_1$')
          plot.pylab.ylabel('$x_2$')

          # Standard normal distribution functions
          phi = stats.distributions.norm().pdf
          PHI = stats.distributions.norm().cdf
          PHIinv = stats.distributions.norm().ppf
          lim = b # set plot limits
          pred_var = numpy.sqrt(pred_var)# * 1.96

          build_fn_vals = build_fn_vals[:,0]
          #normalize prediction variance
          pred_var = pred_var / numpy.max( numpy.max( pred_var ) )

          cax = plot.pylab.imshow(numpy.flipud(pred_var), 
                                  cmap=cm.gray_r, alpha=0.8,
                                  extent=(- lim, lim, - lim, lim))
          norm = plot.pylab.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
          cb = plot.pylab.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
          cb.set_label(r'$\varepsilon[f(x)]$')
          
          plot.pylab.plot(build_pts[0, build_fn_vals <= 0], build_pts[1, build_fn_vals <= 0], 'r.', markersize=12)

          plot.pylab.plot(build_pts[0, build_fn_vals > 0], build_pts[1, build_fn_vals > 0 ], 'b.', markersize=12)
          
          
          cs = plot.pylab.contour(X, Y, true_vals, [0.9], colors='k', linestyles='dashdot')
          
          cs = plot.pylab.contour(X, Y, pred_var, [0.25], colors='b',
                          linestyles='solid')
          plot.pylab.clabel(cs, fontsize=11)

          cs = plot.pylab.contour(X, Y, pred_var, [0.5], colors='k',
                          linestyles='dashed')
          plot.pylab.clabel(cs, fontsize=11)

          cs = plot.pylab.contour(X, Y, pred_var, [0.75], colors='r',
                          linestyles='solid')
          plot.pylab.clabel(cs, fontsize=11)
          
          #plot.pylab.show()
          
          plot.plot_surface( X, Y, true_vals,
                            show = False, fignum = 3,
                            axislabels = None, 
                            title = None )