Esempio n. 1
0
 def step(self, *args):
     """First update the step size, then actually take a step along the gradient."""
     g = self.model.grad(*args);
     
     # Update the weighted Exponential sq avg.
     self.sqExpAvgGrad *= self.exponentAvgM;
     self.sqExpAvgGrad += (1-self.exponentAvgM) * g**2;
     self.sqExpAvgGrad[:] = where(self.sqExpAvgGrad < EPSILON, EPSILON, self.sqExpAvgGrad);
     
     # Uodate the muVect
     possUpdate = 1 + self.qLearningRate * g * self.expAvgGrad / self.sqExpAvgGrad
     #log.debug('max(possUpdate): %.4f,  min(possUpdate): %.4f' % (max(possUpdate), min(possUpdate)))
     ## Keep this from going negative.
     possUpdate = where(possUpdate < 0.001, 0.001, possUpdate);
     self.muVect *= possUpdate
     
     # Do something to cap the update rate.  This is allowing the step rate to overpower the decay completely
     self.muVect = where(self.muVect > self.maxMuVect, self.maxMuVect, self.muVect);
     
     # Then update the exponential average
     self.expAvgGrad *= self.exponentAvgM;
     self.expAvgGrad += (1-self.exponentAvgM) * g;
     
     self.model.params -= self.muVect * g
     Trainer.step(self,*args)
Esempio n. 2
0
    def step(self, *args):
        """First update the step size, then actually take a step along the gradient."""
        g = self.model.grad(*args)

        # Update the weighted Exponential sq avg.
        self.sqExpAvgGrad *= self.exponentAvgM
        self.sqExpAvgGrad += (1 - self.exponentAvgM) * g**2
        self.sqExpAvgGrad[:] = where(self.sqExpAvgGrad < EPSILON, EPSILON,
                                     self.sqExpAvgGrad)

        # Uodate the muVect
        possUpdate = 1 + self.qLearningRate * g * self.expAvgGrad / self.sqExpAvgGrad
        #log.debug('max(possUpdate): %.4f,  min(possUpdate): %.4f' % (max(possUpdate), min(possUpdate)))
        ## Keep this from going negative.
        possUpdate = where(possUpdate < 0.001, 0.001, possUpdate)
        self.muVect *= possUpdate

        # Do something to cap the update rate.  This is allowing the step rate to overpower the decay completely
        self.muVect = where(self.muVect > self.maxMuVect, self.maxMuVect,
                            self.muVect)

        # Then update the exponential average
        self.expAvgGrad *= self.exponentAvgM
        self.expAvgGrad += (1 - self.exponentAvgM) * g

        self.model.params -= self.muVect * g
        Trainer.step(self, *args)
Esempio n. 3
0
 def step(self, *args):
     self.inc = \
             self.momentum*self.inc - self.stepsize * self.model.grad(*args)
     if isnan(sum(self.inc)):
         print 'nan!'
         self.inc = zeros(self.inc.shape, dtype=float)
     self.model.params += self.inc
     Trainer.step(self, *args)
Esempio n. 4
0
 def step(self,*args):
     self.inc = \
             self.momentum*self.inc - self.stepsize * self.model.grad(*args)
     if isnan(sum(self.inc)): 
         print 'nan!'
         self.inc = zeros(self.inc.shape,dtype=float)
     self.model.params += self.inc
     Trainer.step(self,*args)
Esempio n. 5
0
 def step(self, *args):
     updateparams(self.model, fmin_cg(\
                self.cost,self.model.params.copy(),self.grad,\
                         args=args,maxiter=self.cgiterations,disp=1).copy())
     Trainer.step(self, *args)
Esempio n. 6
0
 def step(self, *args):
     g = self.model.grad(*args)
     self.model.params -= self.stepsize * g
     Trainer.step(self, *args)
Esempio n. 7
0
 def step(self,*args):
     updateparams(self.model, fmin_cg(\
                self.cost,self.model.params.copy(),self.grad,\
                         args=args,maxiter=self.cgiterations,disp=1).copy())
     Trainer.step(self,*args)
Esempio n. 8
0
 def step(self,*args):
     g = self.model.grad(*args)
     self.model.params -= self.stepsize * g
     Trainer.step(self,*args)