示例#1
0
   def train(self,lambda_,reguess=True):
   #{
      self.lambd = lambda_
      if (self.lambd<0.0):
         print "regularization parameter, lambda, in LASSO cannot be negative"
         sys.exit(1)
      
      if reguess:
         self.obtain_guess()

      optimizer = l_bfgs(self,40,0,0.05,0)
      max_iter = 5000
      converged = False
      cur_iter = 0
      print_iter = True
      print 'beginning minimization of LASSO objective function with lambda = '+str(self.lambd)
      for i in range(max_iter):
         cur_iter = cur_iter + 1
         converged = optimizer.next_step() 
         if (converged):
            break
         if (print_iter):
            print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  "+optimizer.comment

      if (converged):
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  Optimization Converged"
      else:
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  Optimization Failed"

      return converged
   def solve(self,max_iter):
   #{
      self.obtain_guess() 
      optimizer = l_bfgs(self)

      converged = False
      cur_iter = 0
      for i in range(max_iter):
         cur_iter = cur_iter + 1
         converged = optimizer.next_step() 
         if (converged):
            break
         print "  "+str(cur_iter)+"  "+str(optimizer.value)+"  "+str(optimizer.error)+"  "+optimizer.comment

      if (converged):
         print "  "+str(cur_iter)+"  "+str(optimizer.value)+"  "+str(optimizer.error)+"  Optimization Converged"
      else:
         print 'optimization failed to converge'
示例#3
0
   def train(self,z_,guess_phi,guess_theta,guess_sigma_a_sq):
   #{
   #  z  -  training data
   #  w  -  differenced training data with zero mean
   #  mu -  mean of differenced training data
      self.z = z_
      self.w = np.array(self.z)
      for i in range(self.d):
         differenced = np.zeros(len(self.w)-1)
         for j in range(len(differenced)):
            differenced[j] = self.w[j+1] - self.w[j]
         self.w = np.array(differenced)
      self.mu = float(np.sum(self.w))/float(len(self.w))
      self.w = self.w - self.mu
      
      self.obtain_guess(guess_phi,guess_theta,guess_sigma_a_sq)
      
      #self.coord_scan(1,np.arange(0.1,0.4,0.005))
      
      optimizer = l_bfgs(self,20,0,0.05,0)
      
      self.value()
      max_iter = 50
      converged = False
      cur_iter = 0
      print 'beginning minimization of negative of log-likelihood function'
      for i in range(max_iter):
         cur_iter = cur_iter + 1
         converged = optimizer.next_step() 
         if (converged):
            break
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  "+optimizer.comment

      if (converged):
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  Optimization Converged"
      else:
         print 'optimization failed to converge'
      
      #S = self.compute_S()
      #print 'this is the final S ',S
      #print 'this is the optimal sigma_a_sq ',S/float(len(self.w))
      return converged
   def train(self,tol=1.0E-7,print_iter=False):
   #{
      self.tolerance = tol
      optimizer = l_bfgs(self,40,0,0.05,0)
      max_iter = 5000
      converged = False
      cur_iter = 0
      print 'beginning minimization of negative log likelihood for logistic regression'
      for i in range(max_iter):
         cur_iter = cur_iter + 1
         converged = optimizer.next_step() 
         if (converged):
            break
         if (print_iter):
            print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  "+optimizer.comment

      if (converged):
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  Optimization Converged"
      else:
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+str(optimizer.error)+"  Optimization Failed"
      return converged
 def train(self, X_train, Y_train, tol=1.0E-7, algo=1, print_iter=False):
     # TODO reexpression of class labels as vectors
     self.X_train = X_train
     if self.is_classification:
         # we assume we have been passed a vector of integer labels
         self.Y_train = np.zeros((Y_train.shape[0], np.amax(Y_train)+1), dtype=np.int)
         for i in range(Y_train.shape[0]):
             self.Y_train[i,Y_train[i,0]] = 1
     else:
         self.Y_train = Y_train
     self.tolerance = tol
     if algo == 0:
         optimizer = steepest_descent(self)
     elif algo == 1:
         optimizer = l_bfgs(self,20,0,0.5,0)
     else:
         print 'optimizer not recognized'
     max_iter = 5000
     converged = False
     cur_iter = 0
     print 'beginning optimization of neural network'
     for i in range(max_iter):
         cur_iter = cur_iter + 1
         converged = optimizer.next_step() 
         if (converged):
             break
         if (print_iter):
             print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+\
                 str(optimizer.error)+"  "+optimizer.comment
     if (converged):
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+\
             str(optimizer.error)+"  Optimization Converged"
     else:
         print "  "+str(cur_iter)+"  "+"{:.12f}".format(optimizer.value)+"  "+\
             str(optimizer.error)+"  Optimization Failed"
     return converged
#! /usr/bin/env python
import sys
import os
sys.path.append(os.getcwd()+'/nonlinear_solvers/')
from quadratic_test import quadratic_test
from steepest_descent import steepest_descent
from l_bfgs import l_bfgs

max_iter = 4000
problem_size = 10

problem = quadratic_test(problem_size)
problem.obtain_guess()
#optimizer = steepest_descent(problem)
optimizer = l_bfgs(problem)

converged = False
cur_iter = 0
for i in range(max_iter):
   cur_iter = cur_iter + 1
   converged = optimizer.next_step() 
   if (converged):
      break
   print "  "+str(cur_iter)+"  "+str(optimizer.value)+"  "+str(optimizer.error)+"  "+optimizer.comment

if (converged):
   print "  "+str(cur_iter)+"  "+str(optimizer.value)+"  "+str(optimizer.error)+"  Optimization Converged"
else:
   print 'optimization failed to converge'

print 'doing the same with quadratic_test internal solve function'