Exemple #1
0
    def setup_learning(self, problem):

        Train = self.Train
        Hypers = self.Hypers

        # variables and bound constraints
        X = Train.X
        Y = Train.Y
        DY = Train.DY

        # noise constraints
        sig_ny = Hypers['sig_ny']
        sig_ndy = Hypers['sig_ndy']
        bound_buffer = 2.0

        # feature and target ranges
        DX_min, DX_max, _ = vector_distance(X)
        DY_min, DY_max, _ = vector_distance(Y)

        if DX_min < 1e-6: DX_min = 1e-6
        if DY_min < 1e-6: DY_min = 1e-6

        sig_lo = np.log10(DY_min) - bound_buffer
        sig_hi = np.log10(DY_max) + bound_buffer
        len_lo = np.log10(DX_min) - bound_buffer
        len_hi = np.log10(DX_max) + bound_buffer

        # noise ranges
        sny_lo = sig_ny - bound_buffer
        sny_hi = sig_ny + bound_buffer
        sndy_lo = sig_ndy - bound_buffer
        sndy_hi = sig_ndy + bound_buffer

        # some noise limits
        max_noise_ratio = -1.0
        min_noise_ratio = -8.0
        Kcond_limit = -12.0

        # set variables and bound constraints
        problem.variables = [
            #   ['tag'     , x0               , (lb,ub)          , scl ] ,
            ['sig_f', Hypers['sig_f'], (sig_lo, sig_hi), 1.0],
            ['len_s', Hypers['len_s'], (len_lo, len_hi), 1.0],
            ['sig_ny', Hypers['sig_ny'], (sny_lo, sny_hi), 1.0],
            ['sig_ndy', Hypers['sig_ndy'], (sndy_lo, sndy_hi), 1.0],
        ]

        problem.constraints = [
            #   [ function_handle     , ('output'    ,'><=',  val), scl] ,
            [self.learning_cons, ('nze_rat_y', '<', max_noise_ratio), 1.],
            [self.learning_cons, ('nze_rat_y', '>', min_noise_ratio), 1.],
            [self.learning_cons, ('nze_rat_dy', '<', max_noise_ratio), 1.],
            [self.learning_cons, ('nze_rat_dy', '>', min_noise_ratio), 1.],
            [self.learning_cons, ('rel_nze', '<', 0.0), 1.],
            #[ self.learning_cons , ('nze_dev'   ,'<', 1.0            ), 1. ] ,
            #[ self.likelihood_cons , ('k_cond'    ,'>', Kcond_limit    ), 1. ] ,
        ]

        return problem
Exemple #2
0
 def setup_learning(self,problem):
     
     Train  = self.Train
     Hypers = self.Hypers
     
     # variables and bound constraints
     X  = Train.X
     Y  = Train.Y
     DY = Train.DY     
     
     # noise constraints
     sig_ny  = Hypers['sig_ny']
     sig_ndy = Hypers['sig_ndy']
     bound_buffer  = 2.0
     
     # feature and target ranges
     DX_min,DX_max,_ = vector_distance(X);
     DY_min,DY_max,_ = vector_distance(Y);
     
     if DX_min < 1e-6: DX_min = 1e-6;
     if DY_min < 1e-6: DY_min = 1e-6;
     
     sig_lo = np.log10(DY_min) - bound_buffer
     sig_hi = np.log10(DY_max) + bound_buffer
     len_lo = np.log10(DX_min) - bound_buffer
     len_hi = np.log10(DX_max) + bound_buffer
     
     # noise ranges
     sny_lo  = sig_ny  - bound_buffer
     sny_hi  = sig_ny  + bound_buffer
     sndy_lo = sig_ndy - bound_buffer
     sndy_hi = sig_ndy + bound_buffer        
     
     # some noise limits
     max_noise_ratio = -1.0
     min_noise_ratio = -8.0
     Kcond_limit     = -12.0         
     
     # set variables and bound constraints
     problem.variables = [
     #   ['tag'     , x0               , (lb,ub)          , scl ] , 
         ['sig_f'   , Hypers['sig_f']  , (sig_lo ,sig_hi ), 1.0 ] ,
         ['len_s'   , Hypers['len_s']  , (len_lo ,len_hi ), 1.0 ] ,
         ['sig_ny'  , Hypers['sig_ny'] , (sny_lo ,sny_hi ), 1.0 ] ,
         ['sig_ndy' , Hypers['sig_ndy'], (sndy_lo,sndy_hi), 1.0 ] ,
     ]
     
     problem.constraints = [
     #   [ function_handle     , ('output'    ,'><=',  val), scl] ,
         [ self.learning_cons , ('nze_rat_y' ,'<', max_noise_ratio), 1. ] ,
         [ self.learning_cons , ('nze_rat_y' ,'>', min_noise_ratio), 1. ] ,
         [ self.learning_cons , ('nze_rat_dy','<', max_noise_ratio), 1. ] ,
         [ self.learning_cons , ('nze_rat_dy','>', min_noise_ratio), 1. ] ,
         [ self.learning_cons , ('rel_nze'   ,'<', 0.0            ), 1. ] ,
         #[ self.learning_cons , ('nze_dev'   ,'<', 1.0            ), 1. ] ,
         #[ self.likelihood_cons , ('k_cond'    ,'>', Kcond_limit    ), 1. ] ,
     ]
             
     return problem
Exemple #3
0
def lhc_uniform(XB, NI, XI=None, maxits=100):
    ''' Latin Hypercube Sampling with uniform density
        iterates to maximize minimum L2 distance
    '''

    print "Latin Hypercube Sampling ... "

    # dimension
    ND = XB.shape[0]

    # initial points to respect
    if XI is None:
        XI = np.empty([0, ND])

    # output points
    XO = []

    # initialize
    mindiff = 0

    # maximize minimum distance
    for it in range(maxits):

        # samples
        S = np.zeros([NI, ND])

        # populate samples
        for i_d in range(ND):

            # uniform distribution [0,1], latin hypercube binning
            S[:, i_d] = (np.random.random([1, NI]) +
                         np.random.permutation(NI)) / NI

        # scale to hypercube bounds
        XS = S * (XB[:, 1] - XB[:, 0]) + XB[:, 0]

        # add initial points
        XX = np.vstack([XI, XS])

        if maxits > 1:

            # calc distances
            vecdiff = vector_distance(XX)[0]

            # update
            if vecdiff > mindiff:
                mindiff = vecdiff
                XO = XX

        else:
            XO = XX

    #: for iterate

    if maxits > 1:
        print '  Minimum Distance = %.4g' % mindiff

    return XO
def sub_sample(XS,NI,II=None,maxits=100):
    ''' sub sample an existing dataset
        iterates to maximize minimum L2 distance
    '''
    
    print "Monte Carlo SubSampling ... "
    
    # dimension
    NX,ND = XS.shape
    
    # initial points to respect
    if II is None:
        II = np.empty([0])    
    else:
        II = np.array(II)
       
    # output points
    XO = []
    IO = []
    
    # initialize
    mindiff = 0;
    
    # maximize minimum distance
    for it in range(maxits):
        
        i_d = np.random.permutation(NX)
        
        for i in II:
            i_d = i_d[i_d!=i]
            
        i_d = i_d[1:NI+1]
        
        i_d = np.hstack([II,i_d])
        
        # samples
        XX = XS[i_d,:]
        
        # calc distances
        vecdiff = vector_distance(XX)[0]
        
        # update
        if vecdiff > mindiff:
            mindiff = vecdiff
            XO = XX
            IO = i_d
        
    #: for iterate
    
    print '  Minimum Distance = %.4g' % mindiff
    
    return XO, IO
Exemple #5
0
def sub_sample(XS, NI, II=None, maxits=100):
    ''' sub sample an existing dataset
        iterates to maximize minimum L2 distance
    '''

    print "Monte Carlo SubSampling ... "

    # dimension
    NX, ND = XS.shape

    # initial points to respect
    if II is None:
        II = np.empty([0])
    else:
        II = np.array(II)

    # output points
    XO = []
    IO = []

    # initialize
    mindiff = 0

    # maximize minimum distance
    for it in range(maxits):

        i_d = np.random.permutation(NX)

        for i in II:
            i_d = i_d[i_d != i]

        i_d = i_d[1:NI + 1]

        i_d = np.hstack([II, i_d])

        # samples
        XX = XS[i_d, :]

        # calc distances
        vecdiff = vector_distance(XX)[0]

        # update
        if vecdiff > mindiff:
            mindiff = vecdiff
            XO = XX
            IO = i_d

    #: for iterate

    print '  Minimum Distance = %.4g' % mindiff

    return XO, IO
def lhc_uniform(XB,NI,XI=None,maxits=100):
    ''' Latin Hypercube Sampling with uniform density
        iterates to maximize minimum L2 distance
    '''
    
    print "Latin Hypercube Sampling ... "
    
    # dimension
    ND = XB.shape[0]
    
    # initial points to respect
    if XI is None:
        XI = np.empty([0,ND])
       
    # output points
    XO = []
    
    # initialize
    mindiff = 0;
    
    # maximize minimum distance
    for it in range(maxits):
        
        # samples
        S = np.zeros([NI,ND])
        
        # populate samples
        for i_d in range(ND):
            
            # uniform distribution [0,1], latin hypercube binning
            S[:,i_d] = ( np.random.random([1,NI]) + np.random.permutation(NI) ) / NI
            
        # scale to hypercube bounds
        XS = S*(XB[:,1]-XB[:,0]) + XB[:,0]        
        
        # add initial points
        XX = np.vstack([ XI , XS ])
        
        # calc distances
        vecdiff = vector_distance(XX)[0]
        
        # update
        if vecdiff > mindiff:
            mindiff = vecdiff
            XO = XX
        
    #: for iterate
    
    print '  Minimum Distance = %.4g' % mindiff
    
    return XO