Beispiel #1
0
def gp_train(loghyper, covfunc, X, y, R=None, w=None):
    ''' gp_train() returns the learnt hyperparameters.
    Following chapter 5.4.1 in Rasmussen and Williams: GPs for ML (2006).
    The original version (MATLAB implementation) of used optimizer minimize.m 
    is copyright (C) 1999 - 2006, Carl Edward Rasmussen.
    The used python adaptation is by Roland Memisevic 2008.
    Input R and w is needed for XGP regression! '''

    [logtheta, fvals, iter, nml] = minimize.run(loghyper, nlml, dnlml, [covfunc, X, y, R, w], maxnumfuneval=100)
    return logtheta, nml
Beispiel #2
0
def min_wrapper(hyp, F, Flag, *varargin):
    # Utilize scipy.optimize functions, sgc.py, or minimize.py to
    # minimize the negative log marginal liklihood.
    
    x = convert_to_array(hyp)   # convert the hyperparameter class to an array

    if Flag == 'CG':
        aa = cg(nlml, x, dnlml, (F,hyp,varargin), maxiter=100, disp=False, full_output=True)
        x = aa[0]; fopt = aa[1]; funcCalls = aa[2]; gradcalls = aa[3]
        if aa[4] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[4] ==  2:
            print "Gradient and/or function calls not changing."
        gopt = dnlml(x,F,hyp,varargin)
        return convert_to_class(x,hyp), fopt, gopt, funcCalls

    elif Flag == 'BFGS':
        # Use BFGS
        aa = bfgs(nlml, x, dnlml, (F,hyp,varargin), maxiter=100, disp=False, full_output=True)
        x = aa[0]; fopt = aa[1]; gopt = aa[2]; Bopt = aa[3]; funcCalls = aa[4]; gradcalls = aa[5]
        if aa[6] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[6] ==  2:
            print "Gradient and/or function calls not changing."
        if isinstance(fopt, ndarray):
            fopt = fopt[0]
        return convert_to_class(x,hyp), fopt, gopt, funcCalls

    elif Flag == 'SCG':
        # use sgc.py
        aa   = scg(x, nlml, dnlml, (F,hyp,varargin), niters = 100)
        hyp  = convert_to_class(aa[0],hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0],F,hyp,varargin)
        return hyp, fopt, gopt, len(aa[1])

    elif Flag == 'Minimize':
        # use minimize.py
        aa   = run(x, nlml, dnlml, (F,hyp,varargin), maxnumfuneval=-100)
        hyp  = convert_to_class(aa[0],hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0],F,hyp,varargin)
        return hyp, fopt, gopt, len(aa[1])

    else:
        raise Exception('Incorrect usage of optimization flag in min_wrapper')
Beispiel #3
0
def gp_train(loghyper, covfunc, X, y, R=None, w=None):
    ''' gp_train() returns the learnt hyperparameters.
    Following chapter 5.4.1 in Rasmussen and Williams: GPs for ML (2006).
    The original version (MATLAB implementation) of used optimizer minimize.m 
    is copyright (C) 1999 - 2006, Carl Edward Rasmussen.
    The used python adaptation is by Roland Memisevic 2008.
    
    Input R and w is needed for XGP regression! '''
    if R == None:
        print '        gp_train()'
    else:
        print '        xgp_train()'

    [logtheta, fvals, iter] = minimize.run(loghyper,
                                           nlml,
                                           dnlml, [covfunc, X, y, R, w],
                                           maxnumfuneval=100)

    return logtheta
Beispiel #4
0
    def train_by_optimizer(self,
                           x_val,
                           y_val,
                           number_epoch=10,
                           batch_size=None):
        if self.meanfunc == 'zero':
            params = {
                'sigma_n': self.sigma_n,
                'sigma_f': self.sigma_f,
                'l_k': self.l_k
            }
        else:
            params = {
                'mean': self.mean,
                'sigma_n': self.sigma_n,
                'sigma_f': self.sigma_f,
                'l_k': self.l_k
            }
        import minimize
        if batch_size is None:
            self.batch_size = len(x_val)
        else:
            self.batch_size = batch_size
        self.x_val = x_val
        self.y_val = y_val

        print 'start to optimize'
        likelihood = self.get_likelihood(x_val, y_val)
        print 'BEGINE Training, Log Likelihood = %.2f' % likelihood

        opt_results = minimize.run(self._optimizer_f,
                                   self._get_hypArray(params),
                                   length=number_epoch,
                                   verbose=True)
        optimalHyp = deepcopy(opt_results[0])
        self._apply_hyp(optimalHyp)

        likelihood = self.get_likelihood(x_val, y_val)
        print 'END Training, Log Likelihood = %.2f' % likelihood
Beispiel #5
0
 def train_by_optimizer(self, x_val, y_val,
         number_epoch=10, batch_size=None):
     if self.meanfunc == 'zero':
         params  = {'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
     else:
         params  = {'mean':self.mean,'sigma_n':self.sigma_n, 'sigma_f':self.sigma_f, 'l_k':self.l_k}
     import minimize 
     if batch_size is None:
         self.batch_size = len(x_val)
     else:
         self.batch_size = batch_size
     self.x_val = x_val
     self.y_val = y_val
     
     print 'start to optimize'
     likelihood = self.get_likelihood(x_val, y_val)
     print 'BEGINE Training, Log Likelihood = %.2f'% likelihood
     
     opt_results = minimize.run(self._optimizer_f, self._get_hypArray(params),length=number_epoch,verbose=True)
     optimalHyp = deepcopy(opt_results[0])
     self._apply_hyp(optimalHyp)
     
     likelihood = self.get_likelihood(x_val, y_val)
     print 'END Training, Log Likelihood = %.2f'% likelihood
Beispiel #6
0
def min_wrapper(hyp, F, Flag, *varargin):
    # Utilize scipy.optimize functions, sgc.py, or minimize.py to
    # minimize the negative log marginal liklihood.

    x = convert_to_array(hyp)  # convert the hyperparameter class to an array

    if Flag == 'CG':
        aa = cg(nlml,
                x,
                dnlml, (F, hyp, varargin),
                maxiter=100,
                disp=False,
                full_output=True)
        x = aa[0]
        fopt = aa[1]
        funcCalls = aa[2]
        gradcalls = aa[3]
        if aa[4] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[4] == 2:
            print "Gradient and/or function calls not changing."
        gopt = dnlml(x, F, hyp, varargin)
        return convert_to_class(x, hyp), fopt, gopt, funcCalls

    elif Flag == 'BFGS':
        # Use BFGS
        aa = bfgs(nlml,
                  x,
                  dnlml, (F, hyp, varargin),
                  maxiter=100,
                  disp=False,
                  full_output=True)
        x = aa[0]
        fopt = aa[1]
        gopt = aa[2]
        Bopt = aa[3]
        funcCalls = aa[4]
        gradcalls = aa[5]
        if aa[6] == 1:
            print "Maximum number of iterations exceeded."
        elif aa[6] == 2:
            print "Gradient and/or function calls not changing."
        if isinstance(fopt, ndarray):
            fopt = fopt[0]
        return convert_to_class(x, hyp), fopt, gopt, funcCalls

    elif Flag == 'SCG':
        # use sgc.py
        aa = scg(x, nlml, dnlml, (F, hyp, varargin), niters=100)
        hyp = convert_to_class(aa[0], hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0], F, hyp, varargin)
        return hyp, fopt, gopt, len(aa[1])

    elif Flag == 'Minimize':
        # use minimize.py
        aa = run(x, nlml, dnlml, (F, hyp, varargin), maxnumfuneval=-100)
        hyp = convert_to_class(aa[0], hyp)
        fopt = aa[1][-1]
        gopt = dnlml(aa[0], F, hyp, varargin)
        return hyp, fopt, gopt, len(aa[1])

    else:
        raise Exception('Incorrect usage of optimization flag in min_wrapper')