def step(self, *args): """First update the step size, then actually take a step along the gradient.""" g = self.model.grad(*args); # Update the weighted Exponential sq avg. self.sqExpAvgGrad *= self.exponentAvgM; self.sqExpAvgGrad += (1-self.exponentAvgM) * g**2; self.sqExpAvgGrad[:] = where(self.sqExpAvgGrad < EPSILON, EPSILON, self.sqExpAvgGrad); # Uodate the muVect possUpdate = 1 + self.qLearningRate * g * self.expAvgGrad / self.sqExpAvgGrad #log.debug('max(possUpdate): %.4f, min(possUpdate): %.4f' % (max(possUpdate), min(possUpdate))) ## Keep this from going negative. possUpdate = where(possUpdate < 0.001, 0.001, possUpdate); self.muVect *= possUpdate # Do something to cap the update rate. This is allowing the step rate to overpower the decay completely self.muVect = where(self.muVect > self.maxMuVect, self.maxMuVect, self.muVect); # Then update the exponential average self.expAvgGrad *= self.exponentAvgM; self.expAvgGrad += (1-self.exponentAvgM) * g; self.model.params -= self.muVect * g Trainer.step(self,*args)
def __init__(self, model, onlineChunkSize=10000, initialMu=1, exponentAvgM=0.5, qLearningRate=0.90, numfeats=1, numhidden=0, numout=1, verbose=False, callback=None, callbackargs=None, callbackiteration=1): """Set up the initial stuff""" self.verbose = verbose self.model = model self.exponentAvgM = exponentAvgM self.qLearningRate = qLearningRate self.maxMuVect = None self.muVect = self.setupMu(onlineChunkSize, initialMu, self.model.params.shape, numfeats, numhidden, numout) self.expAvgGrad = zeros(self.muVect.shape) self.sqExpAvgGrad = ones(self.muVect.shape) Trainer.__init__(self, callback, callbackargs, callbackiteration)
def step(self, *args): """First update the step size, then actually take a step along the gradient.""" g = self.model.grad(*args) # Update the weighted Exponential sq avg. self.sqExpAvgGrad *= self.exponentAvgM self.sqExpAvgGrad += (1 - self.exponentAvgM) * g**2 self.sqExpAvgGrad[:] = where(self.sqExpAvgGrad < EPSILON, EPSILON, self.sqExpAvgGrad) # Uodate the muVect possUpdate = 1 + self.qLearningRate * g * self.expAvgGrad / self.sqExpAvgGrad #log.debug('max(possUpdate): %.4f, min(possUpdate): %.4f' % (max(possUpdate), min(possUpdate))) ## Keep this from going negative. possUpdate = where(possUpdate < 0.001, 0.001, possUpdate) self.muVect *= possUpdate # Do something to cap the update rate. This is allowing the step rate to overpower the decay completely self.muVect = where(self.muVect > self.maxMuVect, self.maxMuVect, self.muVect) # Then update the exponential average self.expAvgGrad *= self.exponentAvgM self.expAvgGrad += (1 - self.exponentAvgM) * g self.model.params -= self.muVect * g Trainer.step(self, *args)
def __init__(self, model, stepsize=0.1, verbose=False, callback=None, callbackargs=None, callbackiteration=1): self.verbose = verbose self.model = model self.stepsize = stepsize; Trainer.__init__(self,callback,callbackargs,callbackiteration)
def step(self, *args): self.inc = \ self.momentum*self.inc - self.stepsize * self.model.grad(*args) if isnan(sum(self.inc)): print 'nan!' self.inc = zeros(self.inc.shape, dtype=float) self.model.params += self.inc Trainer.step(self, *args)
def step(self,*args): self.inc = \ self.momentum*self.inc - self.stepsize * self.model.grad(*args) if isnan(sum(self.inc)): print 'nan!' self.inc = zeros(self.inc.shape,dtype=float) self.model.params += self.inc Trainer.step(self,*args)
def __init__(self,model,momentum,stepsize,callback=None, callbackargs=None, callbackiteration=1): self.model = model self.momentum = momentum self.stepsize = stepsize self.inc = zeros(self.model.params.shape,dtype=float) Trainer.__init__(self,callback,callbackargs,callbackiteration)
def __init__(self, model, cgiterations, callback=None, callbackargs=None, callbackiteration=1): self.model = model self.cgiterations = cgiterations Trainer.__init__(self, callback, callbackargs, callbackiteration)
def __init__(self, model, optiterations=10, callback=None, callbackargs=None, callbackiteration=1): self.model = model self.optiterations = optiterations Trainer.__init__(self, callback, callbackargs, callbackiteration)
def __init__(self, model, stepsize=0.1, verbose=False, callback=None, callbackargs=None, callbackiteration=1): self.verbose = verbose self.model = model self.stepsize = stepsize Trainer.__init__(self, callback, callbackargs, callbackiteration)
def __init__(self, model, momentum, stepsize, callback=None, callbackargs=None, callbackiteration=1): self.model = model self.momentum = momentum self.stepsize = stepsize self.inc = zeros(self.model.params.shape, dtype=float) Trainer.__init__(self, callback, callbackargs, callbackiteration)
def __init__(self, model, onlineChunkSize=10000, initialMu=1, exponentAvgM=0.5, qLearningRate=0.90, numfeats=1, numhidden=0, numout=1, verbose=False, callback=None, callbackargs=None, callbackiteration=1): """Set up the initial stuff""" self.verbose = verbose; self.model = model; self.exponentAvgM = exponentAvgM; self.qLearningRate = qLearningRate; self.maxMuVect = None self.muVect = self.setupMu(onlineChunkSize, initialMu, self.model.params.shape, numfeats, numhidden, numout) self.expAvgGrad = zeros(self.muVect.shape); self.sqExpAvgGrad = ones(self.muVect.shape); Trainer.__init__(self, callback, callbackargs, callbackiteration);
def step(self, *args): updateparams(self.model, fmin_cg(\ self.cost,self.model.params.copy(),self.grad,\ args=args,maxiter=self.cgiterations,disp=1).copy()) Trainer.step(self, *args)
def __init__(self,model,optiterations=10,callback=None, callbackargs=None, callbackiteration=1): self.model = model self.optiterations = optiterations Trainer.__init__(self,callback,callbackargs,callbackiteration)
def step(self, *args): g = self.model.grad(*args) self.model.params -= self.stepsize * g Trainer.step(self, *args)
def step(self,*args): g = self.model.grad(*args) self.model.params -= self.stepsize * g Trainer.step(self,*args)
def step(self,*args): updateparams(self.model, fmin_cg(\ self.cost,self.model.params.copy(),self.grad,\ args=args,maxiter=self.cgiterations,disp=1).copy()) Trainer.step(self,*args)
def __init__(self,model,cgiterations,callback=None, callbackargs=None, callbackiteration=1): self.model = model self.cgiterations = cgiterations Trainer.__init__(self,callback,callbackargs,callbackiteration)