def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0, momentum=0., verbose=False, batchlearning=False, weightdecay=0.): """Create a BackpropTrainer to train the specified `module` on the specified `dataset`. The learning rate gives the ratio of which parameters are changed into the direction of the gradient. The learning rate decreases by `lrdecay`, which is used to to multiply the learning rate after each training step. The parameters are also adjusted with respect to `momentum`, which is the ratio by which the gradient of the last timestep is used. If `batchlearning` is set, the parameters are updated only at the end of each epoch. Default is False. `weightdecay` corresponds to the weightdecay rate, where 0 is no weight decay at all. """ Trainer.__init__(self, module) self.setData(dataset) self.verbose = verbose self.batchlearning = batchlearning self.weightdecay = weightdecay self.epoch = 0 self.totalepochs = 0 # set up gradient descender self.descent = GradientDescent() self.descent.alpha = learningrate self.descent.momentum = momentum self.descent.alphadecay = lrdecay self.descent.init(module.params)
def __init__(self): # standard parameters self.epsilon = 2.0 #Initial value of exploration size self.baseline = 0.0 #Moving average baseline, used just for visualisation self.best = -1000000.0 #TODO ersetzen durch -inf self.symCount = 1.0 #Switch for symetric sampling self.gd = GradientDescent() self.gamma = 0.9995 #Exploration decay factor
def _additionalInit(self): if self.sigmaLearningRate is None: self.sigmaLearningRate = self.learningRate self.gdSig = GradientDescent() self.gdSig.alpha = self.sigmaLearningRate self.gdSig.rprop = self.rprop self.sigList = ones(self.numParameters) * self.epsilon #Stores the list of standard deviations (sigmas) self.gdSig.init(self.sigList) self.baseline = None
def __init__(self): # standard parameters self.epsilon = 2.0 #Initial value of sigmas self.baseline=0.0 #Moving average baseline, used for sigma adaption self.best=-1000000.0 #TODO ersetzen durch -inf self.symCount=1.0 #Switch for symetric sampling self.gd = GradientDescent() self.gdSig = GradientDescent() self.wDecay = 0.001 #lasso weight decay (0 to deactivate)
def _setInitEvaluable(self, evaluable): ContinuousOptimizer._setInitEvaluable(self, evaluable) self.current = self._initEvaluable self.gd = GradientDescent() self.gd.alpha = self.learningRate if self.learningRateDecay is not None: self.gd.alphadecay = self.learningRateDecay self.gd.momentum = self.momentum self.gd.rprop = self.rprop self.gd.init(self._initEvaluable)
def __init__(self): # gradient descender self.gd = GradientDescent() # create default explorer self._explorer = None # loglh dataset self.loglh = None # network to tie module and explorer together self.network = None
class FiniteDifferences(ContinuousOptimizer): """ Basic finite difference method. """ epsilon = 1.0 gamma = 0.999 batchSize = 10 # gradient descent parameters learningRate = 0.1 learningRateDecay = None momentum = 0.0 rprop = False def _setInitEvaluable(self, evaluable): ContinuousOptimizer._setInitEvaluable(self, evaluable) self.current = self._initEvaluable self.gd = GradientDescent() self.gd.alpha = self.learningRate if self.learningRateDecay is not None: self.gd.alphadecay = self.learningRateDecay self.gd.momentum = self.momentum self.gd.rprop = self.rprop self.gd.init(self._initEvaluable) def perturbation(self): """ produce a parameter perturbation """ deltas = random.uniform(-self.epsilon, self.epsilon, self.numParameters) # reduce epsilon by factor gamma self.epsilon *= self.gamma return deltas def _learnStep(self): """ calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha. """ # initialize matrix D and vector R D = ones((self.batchSize, self.numParameters)) R = zeros((self.batchSize, 1)) # calculate the gradient with pseudo inverse for i in range(self.batchSize): deltas = self.perturbation() x = self.current + deltas D[i, :] = deltas R[i, :] = self._oneEvaluation(x) beta = dot(pinv(D), R) gradient = ravel(beta) # update the weights self.current = self.gd(gradient)
class FDBasic(FDLearner): def __init__(self): # standard parameters self.epsilon = 1.0 self.gamma = 0.999 self.gd = GradientDescent() def setModule(self, module): FDLearner.setModule(self, module) self.gd.init(self.original) def perturbate(self): """ perturb the parameters. """ # perturb the parameters and store the deltas in dataset deltas = random.uniform(-self.epsilon, self.epsilon, self.module.paramdim) # reduce epsilon by factor gamma self.epsilon *= self.gamma self.ds.append('deltas', deltas) # change the parameters in module (params is a pointer!) params = self.module.params params[:] = self.original + deltas def learn(self): """ calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha. """ assert self.ds != None assert self.module != None # get the deltas from the dataset deltas = self.ds.getField('deltas') # initialize matrix D and vector R D = ones((self.ds.getNumSequences(), self.module.paramdim + 1)) R = zeros((self.ds.getNumSequences(), 1)) # calculate the gradient with pseudo inverse for seq in range(self.ds.getNumSequences()): _state, _action, reward = self.ds.getSequence(seq) D[seq, :-1] = deltas[seq, :] R[seq, :] = mean(reward) beta = dot(pinv(D), R) gradient = ravel(beta[:-1]) # update the weights self.original = self.gd(gradient) self.module._setParameters(self.original) self.module.reset()
class FDBasic(FDLearner): def __init__(self): # standard parameters self.epsilon = 1.0 self.gamma = 0.999 self.gd = GradientDescent() def setModule(self, module): FDLearner.setModule(self, module) self.gd.init(self.original) def perturbate(self): """ perturb the parameters. """ # perturb the parameters and store the deltas in dataset deltas = random.uniform(-self.epsilon, self.epsilon, self.module.paramdim) # reduce epsilon by factor gamma self.epsilon *= self.gamma self.ds.append('deltas', deltas) # change the parameters in module (params is a pointer!) params = self.module.params params[:] = self.original + deltas def learn(self): """ calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha. """ assert self.ds != None assert self.module != None # get the deltas from the dataset deltas = self.ds.getField('deltas') # initialize matrix D and vector R D = ones((self.ds.getNumSequences(), self.module.paramdim + 1)) R = zeros((self.ds.getNumSequences(), 1)) # calculate the gradient with pseudo inverse for seq in range(self.ds.getNumSequences()): _state, _action, reward = self.ds.getSequence(seq) D[seq,:-1] = deltas[seq,:] R[seq,:] = mean(reward) beta = dot(pinv(D), R) gradient = ravel(beta[:-1]) # update the weights self.original = self.gd(gradient) self.module._setParameters(self.original) self.module.reset()
def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0, momentum=0., verbose=False, batchlearning=False, weightdecay=0., errfun=None): """Create a BackpropTrainer to train the specified `module` on the specified `dataset`. The learning rate gives the ratio of which parameters are changed into the direction of the gradient. The learning rate decreases by `lrdecay`, which is used to to multiply the learning rate after each training step. The parameters are also adjusted with respect to `momentum`, which is the ratio by which the gradient of the last timestep is used. If `batchlearning` is set, the parameters are updated only at the end of each epoch. Default is False. `weightdecay` corresponds to the weightdecay rate, where 0 is no weight decay at all. Arguments: errfun (func): Function that takes 2 positional arguments, the target (true) and predicted (estimated) output vectors, and returns an estimate of the signed distance to the target (true) output. default = lambda targ, est: (targ - est)) """ Trainer.__init__(self, module) self.setData(dataset) self.verbose = verbose self.batchlearning = batchlearning self.weightdecay = weightdecay self.epoch = 0 self.totalepochs = 0 # set up gradient descender self.descent = GradientDescent() self.descent.alpha = learningrate self.descent.momentum = momentum self.descent.alphadecay = lrdecay self.descent.init(module.params) self.errfun = errfun or abs_error
def __init__(self): # standard parameters self.epsilon = 2.0 #Initial value of exploration size self.baseline=0.0 #Moving average baseline, used just for visualisation self.best=-1000000.0 #TODO ersetzen durch -inf self.symCount=1.0 #Switch for symetric sampling self.gd = GradientDescent() self.gamma=0.9995 #Exploration decay factor
class PolicyGradientLearner(RLLearner): """ The PolicyGradientLearner takes a ReinforcementDataSet which has been extended with the log likelihood of each parameter for each time step. It additionally takes a Module which has been extended with a gaussian layer on top. It then changes the weights of both the gaussian layer and the rest of the module according to a specific gradient descent, which is a derivation of this base class. """ def __init__(self): self.gd = GradientDescent() def setAlpha(self, alpha): """ pass the alpha value through to the gradient descent object """ self.gd.alpha = alpha print "patching through to gd", alpha def getAlpha(self): return self.gd.alpha alpha = property(getAlpha, setAlpha) def setModule(self, module): RLLearner.setModule(self, module) self.gd.init(module.params) def learn(self): """ calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha. """ assert self.ds != None assert self.module != None # calculate the gradient with the specific function from subclass gradient = ravel(self.calculateGradient()) # scale gradient if it has too large values if max(gradient) > 1000: gradient = gradient / max(gradient) * 1000 # update the parameters p = self.gd(gradient) self.module._setParameters(p) self.module.reset() def calculateGradient(self): abstractMethod()
def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0, momentum=0., verbose=False, batchlearning=False, weightdecay=0.): Trainer.__init__(self, module) self.setData(dataset) self.verbose = verbose self.batchlearning = batchlearning self.weightdecay = weightdecay self.epoch = 0 self.totalepochs = 0 # set up gradient descender self.descent = GradientDescent() self.descent.alpha = learningrate self.descent.momentum = momentum self.descent.alphadecay = lrdecay self.descent.init(module.params)
class SimpleSPSA(FDLearner): def __init__(self): # standard parameters self.epsilon = 2.0 #Initial value of exploration size self.baseline=0.0 #Moving average baseline, used just for visualisation self.best=-1000000.0 #TODO ersetzen durch -inf self.symCount=1.0 #Switch for symetric sampling self.gd = GradientDescent() self.gamma=0.9995 #Exploration decay factor def setModule(self, module): """Sets and initializes all module settings""" FDLearner.setModule(self, module) self.original = zeros(self.module.params.shape) #Stores the parameter set self.module._setParameters(self.original) #initializes the module parameter set to zeros self.gd.init(self.original) self.numOParas=len(self.original) self.genDifVect() def genDifVect(self): # generates a uniform difference vector with the given epsilon self.deltas = (random.randint(0,2,self.numOParas)*2-1)*self.epsilon def perturbate(self): """ perturb the parameters. """ self.symCount*=-1.0 #change sign of perturbation self.ds.append('deltas', self.symCount*self.deltas) #add perturbation to the data set self.module._setParameters(self.original + self.symCount*self.deltas) #set the actual perturbed parameters in module def learn(self): """ calculates the gradient and executes a step in the direction of the gradient, scaled with a learning rate alpha. """ assert self.ds != None assert self.module != None # calculate the gradient reward1=0.0 #reward of positive perturbation reward2=0.0 #reward of negative perturbation sym=1.0 #perturbation switch seqLen=self.ds.getNumSequences() #number of sequences done for learning for seq in range(seqLen): sym*=-1.0 _state, _action, reward = self.ds.getSequence(seq) #add up the rewards of positive and negative perturbation role outs respectivly if sym==1.0: reward1+=sum(reward) else: reward2+=sum(reward) #normate rewards by seqLen reward1/=float(seqLen) reward2/=float(seqLen) self.reward=(reward1+reward2) reward1*=2.0 reward2*=2.0 #check if reward is the best observed up to now if reward1 > self.best: self.best= reward1 if reward2 > self.best: self.best= reward2 #some checks at the first learnign sequence if self.baseline==0.0: self.baseline=self.reward*0.99 fakt=0.0 if seqLen/2 != float(seqLen)/2.0: print "ATTENTON!!! SPSA uses symetric sampling! Number of episodes per learning step must be even! (2 for deterministic settings, >2 for stochastic settings) A numer of episodes of ", seqLen, "is odd." while(True): sleep(1) else: #calc the gradients if reward1!=reward2: #gradient estimate alla SPSA but with liklihood gradient and normalization (see also "update parameters") fakt=(reward1-reward2)/(2.0*self.best-reward1-reward2) else: fakt=0.0 self.baseline=0.9*self.baseline+0.1*self.reward #update baseline # update parameters # as a simplification we use alpha = alpha * epsilon**2 for decaying the stepsize instead of the usual use method from SPSA # resulting in the same update rule like for PGPE self.original = self.gd(fakt*self.epsilon*self.epsilon/self.deltas) # reduce epsilon by factor gamma # as another simplification we let the exploration just decay with gamma. # Is similar to the decreasing exploration in SPSA but simpler. self.epsilon *= self.gamma self.module.reset() # reset the module self.genDifVect() #generate a new perturbation vector
class BackpropTrainer(Trainer): """Trainer that trains the parameters of a module according to a supervised dataset (potentially sequential) by backpropagating the errors (through time).""" def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0, momentum=0., verbose=False, batchlearning=False, weightdecay=0.): """Create a BackpropTrainer to train the specified `module` on the specified `dataset`. The learning rate gives the ratio of which parameters are changed into the direction of the gradient. The learning rate decreases by `lrdecay`, which is used to to multiply the learning rate after each training step. The parameters are also adjusted with respect to `momentum`, which is the ratio by which the gradient of the last timestep is used. If `batchlearning` is set, the parameters are updated only at the end of each epoch. Default is False. `weightdecay` corresponds to the weightdecay rate, where 0 is no weight decay at all. """ Trainer.__init__(self, module) self.setData(dataset) self.verbose = verbose self.batchlearning = batchlearning self.weightdecay = weightdecay self.epoch = 0 self.totalepochs = 0 # set up gradient descender self.descent = GradientDescent() self.descent.alpha = learningrate self.descent.momentum = momentum self.descent.alphadecay = lrdecay self.descent.init(module.params) def train(self): """Train the associated module for one epoch.""" assert len(self.ds) > 0, "Dataset cannot be empty." self.module.resetDerivatives() errors = 0 ponderation = 0. shuffledSequences = [] for seq in self.ds._provideSequences(): shuffledSequences.append(seq) shuffle(shuffledSequences) for seq in shuffledSequences: e, p = self._calcDerivs(seq) errors += e ponderation += p if not self.batchlearning: gradient = self.module.derivs - self.weightdecay * self.module.params new = self.descent(gradient, errors) if new is not None: self.module.params[:] = new self.module.resetDerivatives() if self.verbose: print "Total error:", errors / ponderation if self.batchlearning: self.module._setParameters(self.descent(self.module.derivs)) self.epoch += 1 self.totalepochs += 1 return errors / ponderation def _calcDerivs(self, seq): """Calculate error function and backpropagate output errors to yield the gradient.""" self.module.reset() for sample in seq: self.module.activate(sample[0]) error = 0 ponderation = 0. for offset, sample in reversed(list(enumerate(seq))): # need to make a distinction here between datasets containing # importance, and others target = sample[1] outerr = target - self.module.outputbuffer[offset] if len(sample) > 2: importance = sample[2] error += 0.5 * dot(importance, outerr**2) ponderation += sum(importance) self.module.backActivate(outerr * importance) else: error += 0.5 * sum(outerr**2) ponderation += len(target) # FIXME: the next line keeps arac from producing NaNs. I don't # know why that is, but somehow the __str__ method of the # ndarray class fixes something, str(outerr) self.module.backActivate(outerr) return error, ponderation def _checkGradient(self, dataset=None, silent=False): """Numeric check of the computed gradient for debugging purposes.""" if dataset: self.setData(dataset) res = [] for seq in self.ds._provideSequences(): self.module.resetDerivatives() self._calcDerivs(seq) e = 1e-6 analyticalDerivs = self.module.derivs.copy() numericalDerivs = [] for p in range(self.module.paramdim): storedoldval = self.module.params[p] self.module.params[p] += e righterror, dummy = self._calcDerivs(seq) self.module.params[p] -= 2 * e lefterror, dummy = self._calcDerivs(seq) approxderiv = (righterror - lefterror) / (2 * e) self.module.params[p] = storedoldval numericalDerivs.append(approxderiv) r = zip(analyticalDerivs, numericalDerivs) res.append(r) if not silent: print r return res def testOnData(self, dataset=None, verbose=False): """Compute the MSE of the module performance on the given dataset. If no dataset is supplied, the one passed upon Trainer initialization is used.""" if dataset == None: dataset = self.ds dataset.reset() if verbose: print '\nTesting on data:' errors = [] importances = [] ponderatedErrors = [] for seq in dataset._provideSequences(): self.module.reset() e, i = dataset._evaluateSequence(self.module.activate, seq, verbose) importances.append(i) errors.append(e) ponderatedErrors.append(e / i) if verbose: print 'All errors:', ponderatedErrors assert sum(importances) > 0 avgErr = sum(errors) / sum(importances) if verbose: print 'Average error:', avgErr print('Max error:', max(ponderatedErrors), 'Median error:', sorted(ponderatedErrors)[len(errors) / 2]) return avgErr def testOnClassData(self, dataset=None, verbose=False, return_targets=False): """Return winner-takes-all classification output on a given dataset. If no dataset is given, the dataset passed during Trainer initialization is used. If return_targets is set, also return corresponding target classes. """ if dataset == None: dataset = self.ds dataset.reset() out = [] targ = [] for seq in dataset._provideSequences(): self.module.reset() for input, target in seq: res = self.module.activate(input) out.append(argmax(res)) targ.append(argmax(target)) if return_targets: return out, targ else: return out def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None, continueEpochs=10, validationProportion=0.25): """Train the module on the dataset until it converges. Return the module with the parameters that gave the minimal validation error. If no dataset is given, the dataset passed during Trainer initialization is used. validationProportion is the ratio of the dataset that is used for the validation dataset. If maxEpochs is given, at most that many epochs are trained. Each time validation error hits a minimum, try for continueEpochs epochs to find a better one.""" epochs = 0 if dataset == None: dataset = self.ds if verbose == None: verbose = self.verbose # Split the dataset randomly: validationProportion of the samples for # validation. trainingData, validationData = ( dataset.splitWithProportion(1 - validationProportion)) if not (len(trainingData) > 0 and len(validationData)): raise ValueError( "Provided dataset too small to be split into training " + "and validation sets with proportion " + str(validationProportion)) self.ds = trainingData bestweights = self.module.params.copy() bestverr = self.testOnData(validationData) trainingErrors = [] validationErrors = [bestverr] while True: trainingErrors.append(self.train()) validationErrors.append(self.testOnData(validationData)) if epochs == 0 or validationErrors[-1] < bestverr: # one update is always done bestverr = validationErrors[-1] bestweights = self.module.params.copy() if maxEpochs != None and epochs >= maxEpochs: self.module.params[:] = bestweights break epochs += 1 if len(validationErrors) >= continueEpochs * 2: # have the validation errors started going up again? # compare the average of the last few to the previous few old = validationErrors[-continueEpochs * 2:-continueEpochs] new = validationErrors[-continueEpochs:] if min(new) > max(old): self.module.params[:] = bestweights break trainingErrors.append(self.testOnData(trainingData)) self.ds = dataset if verbose: print 'train-errors:', fListToString(trainingErrors, 6) print 'valid-errors:', fListToString(validationErrors, 6) return trainingErrors, validationErrors
def __init__(self): self.gd = GradientDescent()
class ExtendedBackpropTrainer(Trainer): """Trainer that trains the parameters of a module according to a supervised dataset (potentially sequential) by backpropagating the errors (through time).""" def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0, momentum=0., verbose=False, batchlearning=False, weightdecay=0.): """Create a BackpropTrainer to train the specified `module` on the specified `dataset`. The learning rate gives the ratio of which parameters are changed into the direction of the gradient. The learning rate decreases by `lrdecay`, which is used to to multiply the learning rate after each training step. The parameters are also adjusted with respect to `momentum`, which is the ratio by which the gradient of the last timestep is used. If `batchlearning` is set, the parameters are updated only at the end of each epoch. Default is False. `weightdecay` corresponds to the weightdecay rate, where 0 is no weight decay at all. """ Trainer.__init__(self, module) self.setData(dataset) self.verbose = verbose self.batchlearning = batchlearning self.weightdecay = weightdecay self.epoch = 0 self.totalepochs = 0 # set up gradient descender self.descent = GradientDescent() self.descent.alpha = learningrate self.descent.momentum = momentum self.descent.alphadecay = lrdecay self.descent.init(module.params) def train(self): """Train the associated module for one epoch.""" assert len(self.ds) > 0, "Dataset cannot be empty." self.module.resetDerivatives() errors = 0 ponderation = 0. shuffledSequences = [] for seq in self.ds._provideSequences(): shuffledSequences.append(seq) shuffle(shuffledSequences) for seq in shuffledSequences: e, p = self._calcDerivs(seq) errors += e ponderation += p if not self.batchlearning: gradient = self.module.derivs - self.weightdecay * self.module.params new = self.descent(gradient, errors) if new is not None: self.module.params[:] = new self.module.resetDerivatives() if self.verbose: print("Total error: {z: .12g}".format(z=errors / ponderation)) if self.batchlearning: self.module._setParameters(self.descent(self.module.derivs)) self.epoch += 1 self.totalepochs += 1 return errors / ponderation def _calcDerivs(self, seq): """Calculate error function and backpropagate output errors to yield the gradient.""" self.module.reset() for sample in seq: self.module.activate(sample[0]) error = 0 ponderation = 0. for offset, sample in reversed(list(enumerate(seq))): # need to make a distinction here between datasets containing # importance, and others target = sample[1] outerr = target - self.module.outputbuffer[offset] if len(sample) > 2: importance = sample[2] error += 0.5 * dot(importance, outerr ** 2) ponderation += sum(importance) self.module.backActivate(outerr * importance) else: error += 0.5 * sum(outerr ** 2) ponderation += len(target) # FIXME: the next line keeps arac from producing NaNs. I don't # know why that is, but somehow the __str__ method of the # ndarray class fixes something, str(outerr) self.module.backActivate(outerr) return error, ponderation def _checkGradient(self, dataset=None, silent=False): """Numeric check of the computed gradient for debugging purposes.""" if dataset: self.setData(dataset) res = [] for seq in self.ds._provideSequences(): self.module.resetDerivatives() self._calcDerivs(seq) e = 1e-6 analyticalDerivs = self.module.derivs.copy() numericalDerivs = [] for p in range(self.module.paramdim): storedoldval = self.module.params[p] self.module.params[p] += e righterror, dummy = self._calcDerivs(seq) self.module.params[p] -= 2 * e lefterror, dummy = self._calcDerivs(seq) approxderiv = (righterror - lefterror) / (2 * e) self.module.params[p] = storedoldval numericalDerivs.append(approxderiv) r = list(zip(analyticalDerivs, numericalDerivs)) res.append(r) if not silent: print(r) return res def testOnData(self, dataset=None, verbose=False): """Compute the MSE of the module performance on the given dataset. If no dataset is supplied, the one passed upon Trainer initialization is used.""" if dataset == None: dataset = self.ds dataset.reset() if verbose: print('\nTesting on data:') errors = [] importances = [] ponderatedErrors = [] for seq in dataset._provideSequences(): self.module.reset() e, i = dataset._evaluateSequence(self.module.activate, seq, verbose) importances.append(i) errors.append(e) ponderatedErrors.append(e / i) if verbose: print(('All errors:', ponderatedErrors)) assert sum(importances) > 0 avgErr = sum(errors) / sum(importances) if verbose: print(('Average error:', avgErr)) print(('Max error:', max(ponderatedErrors), 'Median error:', sorted(ponderatedErrors)[len(errors) / 2])) return avgErr def testOnClassData(self, dataset=None, verbose=False, return_targets=False): """Return winner-takes-all classification output on a given dataset. If no dataset is given, the dataset passed during Trainer initialization is used. If return_targets is set, also return corresponding target classes. """ if dataset == None: dataset = self.ds dataset.reset() out = [] targ = [] for seq in dataset._provideSequences(): self.module.reset() for input, target, importance in seq: res = self.module.activate(input) out.append(argmax(res)) targ.append(argmax(target)) if return_targets: return out, targ else: return out def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None, continueEpochs=10, validationProportion=0.25, trainingData=None, validationData=None, convergence_threshold=10): """Train the module on the dataset until it converges. Return the module with the parameters that gave the minimal validation error. If no dataset is given, the dataset passed during Trainer initialization is used. validationProportion is the ratio of the dataset that is used for the validation dataset. If the training and validation data is already set, the splitPropotion is ignored If maxEpochs is given, at most that many epochs are trained. Each time validation error hits a minimum, try for continueEpochs epochs to find a better one.""" epochs = 0 if dataset is None: dataset = self.ds if verbose is None: verbose = self.verbose if trainingData is None or validationData is None: # Split the dataset randomly: validationProportion of the samples for # validation. trainingData, validationData = ( dataset.splitWithProportion(1 - validationProportion)) if not (len(trainingData) > 0 and len(validationData)): raise ValueError("Provided dataset too small to be split into training " + "and validation sets with proportion " + str(validationProportion)) self.ds = trainingData bestweights = self.module.params.copy() bestverr = self.testOnData(validationData) bestepoch = 0 self.trainingErrors = [] self.validationErrors = [bestverr] while True: trainingError = self.train() validationError = self.testOnData(validationData) if isnan(trainingError) or isnan(validationError): raise Exception("Training produced NaN results") self.trainingErrors.append(trainingError) self.validationErrors.append(validationError) if epochs == 0 or self.validationErrors[-1] < bestverr: # one update is always done bestverr = self.validationErrors[-1] bestweights = self.module.params.copy() bestepoch = epochs if maxEpochs != None and epochs >= maxEpochs: self.module.params[:] = bestweights break epochs += 1 if len(self.validationErrors) >= continueEpochs * 2: # have the validation errors started going up again? # compare the average of the last few to the previous few old = self.validationErrors[-continueEpochs * 2:-continueEpochs] new = self.validationErrors[-continueEpochs:] if min(new) > max(old): self.module.params[:] = bestweights break lastnew = round(new[-1], convergence_threshold) if sum(round(y, convergence_threshold) - lastnew for y in new) == 0: self.module.params[:] = bestweights break #self.trainingErrors.append(self.testOnData(trainingData)) self.ds = dataset if verbose: print(('train-errors:', fListToString(self.trainingErrors, 6))) print(('valid-errors:', fListToString(self.validationErrors, 6))) return self.trainingErrors[:bestepoch], self.validationErrors[:1 + bestepoch]
class PGPE(FiniteDifferences): """ Policy Gradients with Parameter Exploration (ICANN 2008).""" batchSize = 2 #:exploration type exploration = "local" #: specific settings for sigma updates learningRate = 0.2 #: specific settings for sigma updates sigmaLearningRate = 0.1 #: Initial value of sigmas epsilon = 2.0 #:lasso weight decay (0 to deactivate) wDecay = 0.0 #:momentum term (0 to deactivate) momentum = 0.0 #:rprop decent (False to deactivate) rprop = False def _additionalInit(self): if self.sigmaLearningRate is None: self.sigmaLearningRate = self.learningRate self.gdSig = GradientDescent() self.gdSig.alpha = self.sigmaLearningRate self.gdSig.rprop = self.rprop self.sigList = ones(self.numParameters) * self.epsilon #Stores the list of standard deviations (sigmas) self.gdSig.init(self.sigList) self.baseline = None def perturbation(self): """ Generate a difference vector with the given standard deviations """ return random.normal(0., self.sigList) def _learnStep(self): """ calculates the gradient and executes a step in the direction of the gradient, scaled with a learning rate alpha. """ deltas = self.perturbation() #reward of positive and negative perturbations reward1 = self._oneEvaluation(self.current + deltas) reward2 = self._oneEvaluation(self.current - deltas) self.mreward = (reward1 + reward2) / 2. if self.baseline is None: # first learning step self.baseline = self.mreward fakt = 0. fakt2 = 0. else: #calc the gradients if reward1 != reward2: #gradient estimate alla SPSA but with likelihood gradient and normalization fakt = (reward1 - reward2) / (2. * self.bestEvaluation - reward1 - reward2) else: fakt=0. #normalized sigma gradient with moving average baseline norm = (self.bestEvaluation-self.baseline) if norm != 0.0: fakt2=(self.mreward-self.baseline)/(self.bestEvaluation-self.baseline) else: fakt2 = 0.0 #update baseline self.baseline = 0.9 * self.baseline + 0.1 * self.mreward # update parameters and sigmas self.current = self.gd(fakt * deltas - self.current * self.sigList * self.wDecay) if fakt2 > 0.: #for sigma adaption alg. follows only positive gradients if self.exploration == "global": #apply sigma update globally self.sigList = self.gdSig(fakt2 * ((self.deltas ** 2).sum() - (self.sigList ** 2).sum()) / (self.sigList * float(self.numParameters))) elif self.exploration == "local": #apply sigma update locally self.sigList = self.gdSig(fakt2 * (deltas * deltas - self.sigList * self.sigList) / self.sigList) elif self.exploration == "cma": #I have to think about that - needs also an option in perturbation raise NotImplementedError() else: raise NotImplementedError(str(self.exploration) + " not a known exploration parameter setting.")
class BackpropTrainerWiener(Trainer): def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0, momentum=0., verbose=False, batchlearning=False, weightdecay=0.): Trainer.__init__(self, module) self.setData(dataset) self.verbose = verbose self.batchlearning = batchlearning self.weightdecay = weightdecay self.epoch = 0 self.totalepochs = 0 # set up gradient descender self.descent = GradientDescent() self.descent.alpha = learningrate self.descent.momentum = momentum self.descent.alphadecay = lrdecay self.descent.init(module.params) def train(self): assert len(self.ds) > 0, "Dataset cannot be empty." self.module.resetDerivatives() errors = 0 ponderation = 0. shuffledSequences = [] for seq in self.ds._provideSequences(): shuffledSequences.append(seq) shuffle(shuffledSequences) for seq in shuffledSequences: e, p = self._calcDerivs(seq) errors += e ponderation += p if not self.batchlearning: gradient = self.module.derivs - self.weightdecay * self.module.params new = self.descent(gradient, errors) if new is not None: self.module.params[:] = new self.module.resetDerivatives() if self.verbose: print "Total error:", errors / ponderation if self.batchlearning: self.module._setParameters(self.descent(self.module.derivs)) self.epoch += 1 self.totalepochs += 1 return errors / ponderation def _calcDerivs(self, seq): self.module.reset() for sample in seq: self.module.activate(sample[0]) error = 0 ponderation = 0. for offset, sample in reversed(list(enumerate(seq))): target = sample[1] outerr = target - self.module.outputbuffer[offset] if len(sample) > 2: importance = sample[2] error += 0.5 * dot(importance, outerr ** 2) ponderation += sum(importance) self.module.backActivate(outerr * importance) else: error += 0.5 * sum(outerr ** 2) ponderation += len(target) str(outerr) self.module.backActivate(outerr) return error, ponderation def _checkGradient(self, dataset=None, silent=False): if dataset: self.setData(dataset) res = [] for seq in self.ds._provideSequences(): self.module.resetDerivatives() self._calcDerivs(seq) e = 1e-6 analyticalDerivs = self.module.derivs.copy() numericalDerivs = [] for p in range(self.module.paramdim): storedoldval = self.module.params[p] self.module.params[p] += e righterror, dummy = self._calcDerivs(seq) self.module.params[p] -= 2 * e lefterror, dummy = self._calcDerivs(seq) approxderiv = (righterror - lefterror) / (2 * e) self.module.params[p] = storedoldval numericalDerivs.append(approxderiv) r = zip(analyticalDerivs, numericalDerivs) res.append(r) if not silent: print r return res def testOnData(self, dataset=None, verbose=False): if dataset == None: dataset = self.ds dataset.reset() if verbose: print '\nTesting on data:' errors = [] importances = [] ponderatedErrors = [] for seq in dataset._provideSequences(): self.module.reset() e, i = dataset._evaluateSequence(self.module.activate, seq, verbose) importances.append(i) errors.append(e) ponderatedErrors.append(e / i) if verbose: print 'All errors:', ponderatedErrors assert sum(importances) > 0 avgErr = sum(errors) / sum(importances) if verbose: print 'Average error:', avgErr print ('Max error:', max(ponderatedErrors), 'Median error:', sorted(ponderatedErrors)[len(errors) / 2]) return avgErr def testOnClassData(self, dataset=None, verbose=False, return_targets=False): if dataset == None: dataset = self.ds dataset.reset() out = [] targ = [] for seq in dataset._provideSequences(): self.module.reset() for input, target in seq: res = self.module.activate(input) out.append(argmax(res)) targ.append(argmax(target)) if return_targets: return out, targ else: return out def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None, continueEpochs=10, validationProportion=0.25): epochs = 0 if dataset == None: dataset = self.ds if verbose == None: verbose = self.verbose trainingData, validationData = ( dataset.splitWithProportion(1 - validationProportion)) if not (len(trainingData) > 0 and len(validationData)): raise ValueError("Provided dataset too small to be split into training " + "and validation sets with proportion " + str(validationProportion)) self.ds = trainingData bestweights = self.module.params.copy() bestverr = self.testOnData(validationData) trainingErrors = [] validationErrors = [bestverr] while True: trainingErrors.append(self.train()) validationErrors.append(self.testOnData(validationData)) if epochs == 0 or validationErrors[-1] < bestverr: bestverr = validationErrors[-1] bestweights = self.module.params.copy() if maxEpochs != None and epochs >= maxEpochs: self.module.params[:] = bestweights break epochs += 1 if len(validationErrors) >= continueEpochs * 2: old = validationErrors[-continueEpochs * 2:-continueEpochs] new = validationErrors[-continueEpochs:] if min(new) > max(old): self.module.params[:] = bestweights break trainingErrors.append(self.testOnData(trainingData)) self.ds = dataset if verbose: print 'train-errors:', fListToString(trainingErrors, 6) print 'valid-errors:', fListToString(validationErrors, 6) return trainingErrors, validationErrors
class PolicyGradientLearner(DirectSearchLearner, DataSetLearner, ExploringLearner): """ PolicyGradientLearner is a super class for all continuous direct search algorithms that use the log likelihood of the executed action to update the weights. Subclasses are ENAC, GPOMDP, or REINFORCE. """ _module = None def __init__(self): # gradient descender self.gd = GradientDescent() # create default explorer self._explorer = None # loglh dataset self.loglh = None # network to tie module and explorer together self.network = None def _setLearningRate(self, alpha): """ pass the alpha value through to the gradient descent object """ self.gd.alpha = alpha def _getLearningRate(self): return self.gd.alpha learningRate = property(_getLearningRate, _setLearningRate) def _setModule(self, moduleParam): """ initialize gradient descender with module parameters and the loglh dataset with the outdim of the module. """ self._module = moduleParam # initialize explorer self._explorer = NormalExplorer(moduleParam.outdim) # build network self._initializeNetwork() def _getModule(self): return self._module module = property(_getModule, _setModule) def _setExplorer(self, explorer): """ assign non-standard explorer to the policy gradient learner. requires the module to be set beforehand. """ assert self._module self._explorer = explorer # build network self._initializeNetwork() def _getExplorer(self): return self._explorer explorer = property(_getExplorer, _setExplorer) def _initializeNetwork(self): """ build the combined network consisting of the module and the explorer and initializing the log likelihoods dataset. """ self.network = FeedForwardNetwork() self.network.addInputModule(self._module) self.network.addOutputModule(self._explorer) self.network.addConnection( IdentityConnection(self._module, self._explorer)) self.network.sortModules() # initialize gradient descender self.gd.init(self.network.params) # initialize loglh dataset self.loglh = LoglhDataSet(self.network.paramdim) def learn(self): """ calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha. """ if self.dataset is None: raise Exception("Dataset must be not None!") if self.module is None: raise Exception("Module must be not None!") # calculate the gradient with the specific function from subclass gradient = self.calculateGradient() # scale gradient if it has too large values if max(gradient) > 1000: gradient = gradient / max(gradient) * 1000 # update the parameters of the module p = self.gd(gradient.flatten()) self.network._setParameters(p) self.network.reset() def explore(self, state, action): # forward pass of exploration explorative = ExploringLearner.explore(self, state, action) # backward pass through network and store derivs self.network.backward() self.loglh.appendLinked(self.network.derivs.copy()) return explorative def reset(self): self.loglh.clear() def calculateGradient(self): abstractMethod()
def __init__(self): # standard parameters self.epsilon = 1.0 self.gamma = 0.999 self.gd = GradientDescent()
class SPLA(FDLearner): def __init__(self): # standard parameters self.epsilon = 2.0 #Initial value of sigmas self.baseline=0.0 #Moving average baseline, used for sigma adaption self.best=-1000000.0 #TODO ersetzen durch -inf self.symCount=1.0 #Switch for symetric sampling self.gd = GradientDescent() self.gdSig = GradientDescent() self.wDecay = 0.001 #lasso weight decay (0 to deactivate) def setModule(self, module): """Sets and initializes all module settings""" FDLearner.setModule(self, module) self.original = zeros(self.module.params.shape) #Stores the parameter set self.sigList=ones(self.module.params.shape) #Stores the list of standard deviations (sigmas) self.initSigmas() self.deltas=zeros(self.module.params.shape) #the parameter difference vector for exploration self.module._setParameters(self.original) #initializes the module parameter set to zeros self.gd.init(self.original) self.numOParas=len(self.original) def initSigmas(self): self.sigList*=self.epsilon #initialize sigmas to epsilon self.gdSig.init(self.sigList) def genDifVect(self): # generates a difference vector with the given standard deviations self.deltas=random.normal(0.0, self.sigList) def perturbate(self): """ perturb the parameters. """ self.symCount*=-1.0 #change sign of perturbation self.ds.append('deltas', self.symCount*self.deltas) #add perturbation to the data set self.module._setParameters(self.original + self.symCount*self.deltas) #set the actual perturbed parameters in module def learn(self): """ calculates the gradient and executes a step in the direction of the gradient, scaled with a learning rate alpha. """ assert self.ds != None assert self.module != None # calculate the gradient reward1=0.0 #reward of positive perturbation reward2=0.0 #reward of negative perturbation sym=1.0 #perturbation switch seqLen=self.ds.getNumSequences() #number of sequences done for learning for seq in range(seqLen): sym*=-1.0 _, _, reward = self.ds.getSequence(seq) #add up the rewards of positive and negative perturbation role outs respectivly if sym==1.0: reward1+=sum(reward) else: reward2+=sum(reward) #normate rewards by seqLen reward1/=float(seqLen) reward2/=float(seqLen) self.reward=(reward1+reward2) reward1*=2.0 reward2*=2.0 #check if reward is the best observed up to now if reward1 > self.best: self.best= reward1 if reward2 > self.best: self.best= reward2 #some checks at the first learnign sequence if self.baseline==0.0: self.baseline=self.reward/2.0 fakt=0.0 fakt2=0.0 if seqLen/2 != float(seqLen)/2.0: print "ATTENTON!!! SPLA uses symetric sampling! Number of episodes per learning step must be even! (2 for deterministic settings, >2 for stochastic settings) A numer of episodes of ", seqLen, "is odd." while(True): sleep(1) else: #calc the gradients if reward1!=reward2: #gradient estimate alla SPSA but with liklihood gradient and normalization fakt=(reward1-reward2)/(2.0*self.best-reward1-reward2) else: fakt=0.0 #normalized sigma gradient with moving average baseline fakt2=(self.reward-self.baseline)/(self.best-self.baseline) self.baseline=0.9*self.baseline+0.1*self.reward #update baseline # update parameters and sigmas self.original = self.gd(fakt*self.deltas-self.original*self.sigList*self.wDecay) print abs(self.original).sum()/self.numOParas if fakt2> 0.0: #for sigma adaption alg. follows only positive gradients self.sigList=self.gdSig(fakt2*(self.deltas*self.deltas-self.sigList*self.sigList)/self.sigList) #apply sigma update self.module.reset() # reset the module self.genDifVect() #generate a new perturbation vector
class PolicyGradientLearner(DirectSearchLearner, DataSetLearner, ExploringLearner): """ PolicyGradientLearner is a super class for all continuous direct search algorithms that use the log likelihood of the executed action to update the weights. Subclasses are ENAC, GPOMDP, or REINFORCE. """ _module = None def __init__(self): # gradient descender self.gd = GradientDescent() # create default explorer self._explorer = None # loglh dataset self.loglh = None # network to tie module and explorer together self.network = None def _setLearningRate(self, alpha): """ pass the alpha value through to the gradient descent object """ self.gd.alpha = alpha def _getLearningRate(self): return self.gd.alpha learningRate = property(_getLearningRate, _setLearningRate) def _setModule(self, module): """ initialize gradient descender with module parameters and the loglh dataset with the outdim of the module. """ self._module = module # initialize explorer self._explorer = NormalExplorer(module.outdim) # build network self._initializeNetwork() def _getModule(self): return self._module module = property(_getModule, _setModule) def _setExplorer(self, explorer): """ assign non-standard explorer to the policy gradient learner. requires the module to be set beforehand. """ assert self._module self._explorer = explorer # build network self._initializeNetwork() def _getExplorer(self): return self._explorer explorer = property(_getExplorer, _setExplorer) def _initializeNetwork(self): """ build the combined network consisting of the module and the explorer and initializing the log likelihoods dataset. """ self.network = FeedForwardNetwork() self.network.addInputModule(self._module) self.network.addOutputModule(self._explorer) self.network.addConnection(IdentityConnection(self._module, self._explorer)) self.network.sortModules() # initialize gradient descender self.gd.init(self.network.params) # initialize loglh dataset self.loglh = LoglhDataSet(self.network.paramdim) def learn(self): """ calls the gradient calculation function and executes a step in direction of the gradient, scaled with a small learning rate alpha. """ assert self.dataset != None assert self.module != None # calculate the gradient with the specific function from subclass gradient = self.calculateGradient() # scale gradient if it has too large values if max(gradient) > 1000: gradient = gradient / max(gradient) * 1000 # update the parameters of the module p = self.gd(gradient.flatten()) self.network._setParameters(p) self.network.reset() def explore(self, state, action): # forward pass of exploration explorative = ExploringLearner.explore(self, state, action) # backward pass through network and store derivs self.network.backward() self.loglh.appendLinked(self.network.derivs.copy()) return explorative def reset(self): self.loglh.clear() def calculateGradient(self): abstractMethod()
class SimpleSPSA(FDLearner): def __init__(self): # standard parameters self.epsilon = 2.0 #Initial value of exploration size self.baseline = 0.0 #Moving average baseline, used just for visualisation self.best = -1000000.0 #TODO ersetzen durch -inf self.symCount = 1.0 #Switch for symetric sampling self.gd = GradientDescent() self.gamma = 0.9995 #Exploration decay factor def setModule(self, module): """Sets and initializes all module settings""" FDLearner.setModule(self, module) self.original = zeros( self.module.params.shape) #Stores the parameter set self.module._setParameters( self.original) #initializes the module parameter set to zeros self.gd.init(self.original) self.numOParas = len(self.original) self.genDifVect() def genDifVect(self): # generates a uniform difference vector with the given epsilon self.deltas = (random.randint(0, 2, self.numOParas) * 2 - 1) * self.epsilon def perturbate(self): """ perturb the parameters. """ self.symCount *= -1.0 #change sign of perturbation self.ds.append('deltas', self.symCount * self.deltas) #add perturbation to the data set self.module._setParameters( self.original + self.symCount * self.deltas) #set the actual perturbed parameters in module def learn(self): """ calculates the gradient and executes a step in the direction of the gradient, scaled with a learning rate alpha. """ assert self.ds != None assert self.module != None # calculate the gradient reward1 = 0.0 #reward of positive perturbation reward2 = 0.0 #reward of negative perturbation sym = 1.0 #perturbation switch seqLen = self.ds.getNumSequences( ) #number of sequences done for learning for seq in range(seqLen): sym *= -1.0 _state, _action, reward = self.ds.getSequence(seq) #add up the rewards of positive and negative perturbation role outs respectivly if sym == 1.0: reward1 += sum(reward) else: reward2 += sum(reward) #normate rewards by seqLen reward1 /= float(seqLen) reward2 /= float(seqLen) self.reward = (reward1 + reward2) reward1 *= 2.0 reward2 *= 2.0 #check if reward is the best observed up to now if reward1 > self.best: self.best = reward1 if reward2 > self.best: self.best = reward2 #some checks at the first learnign sequence if self.baseline == 0.0: self.baseline = self.reward * 0.99 fakt = 0.0 if seqLen / 2 != float(seqLen) / 2.0: print "ATTENTON!!! SPSA uses symetric sampling! Number of episodes per learning step must be even! (2 for deterministic settings, >2 for stochastic settings) A numer of episodes of ", seqLen, "is odd." while (True): sleep(1) else: #calc the gradients if reward1 != reward2: #gradient estimate alla SPSA but with liklihood gradient and normalization (see also "update parameters") fakt = (reward1 - reward2) / (2.0 * self.best - reward1 - reward2) else: fakt = 0.0 self.baseline = 0.9 * self.baseline + 0.1 * self.reward #update baseline # update parameters # as a simplification we use alpha = alpha * epsilon**2 for decaying the stepsize instead of the usual use method from SPSA # resulting in the same update rule like for PGPE self.original = self.gd(fakt * self.epsilon * self.epsilon / self.deltas) # reduce epsilon by factor gamma # as another simplification we let the exploration just decay with gamma. # Is similar to the decreasing exploration in SPSA but simpler. self.epsilon *= self.gamma self.module.reset() # reset the module self.genDifVect() #generate a new perturbation vector
class PGPE(FiniteDifferences): """ Policy Gradients with Parameter Exploration (ICANN 2008).""" #:exploration type exploration = "local" #: specific settings for sigma updates learningRate = 0.2 #: specific settings for sigma updates sigmaLearningRate = 0.1 #: Initial value of sigmas epsilon = 2.0 #:lasso weight decay (0 to deactivate) wDecay = 0.0 #:momentum term (0 to deactivate) momentum = 0.0 #:rprop decent (False to deactivate) rprop = False def _additionalInit(self): if self.sigmaLearningRate is None: self.sigmaLearningRate = self.learningRate self.gdSig = GradientDescent() self.gdSig.alpha = self.sigmaLearningRate self.gdSig.rprop = self.rprop self.sigList = ones(self.numParameters) * self.epsilon #Stores the list of standard deviations (sigmas) self.gdSig.init(self.sigList) self.baseline = None def perturbation(self): """ Generate a difference vector with the given standard deviations """ return random.normal(0., self.sigList) def _learnStep(self): """ calculates the gradient and executes a step in the direction of the gradient, scaled with a learning rate alpha. """ deltas = self.perturbation() #reward of positive and negative perturbations reward1 = self._oneEvaluation(self.current + deltas) reward2 = self._oneEvaluation(self.current - deltas) self.mreward = (reward1 + reward2) / 2. if self.baseline is None: # first learning step self.baseline = self.mreward fakt = 0. fakt2 = 0. else: #calc the gradients if reward1 != reward2: #gradient estimate alla SPSA but with likelihood gradient and normalization fakt = (reward1 - reward2) / (2. * self.bestEvaluation - reward1 - reward2) else: fakt=0. #normalized sigma gradient with moving average baseline norm = (self.bestEvaluation-self.baseline) if norm != 0.0: fakt2=(self.mreward-self.baseline)/(self.bestEvaluation-self.baseline) else: fakt2 = 0.0 #update baseline self.baseline = 0.9 * self.baseline + 0.1 * self.mreward # update parameters and sigmas self.current = self.gd(fakt * deltas - self.current * self.sigList * self.wDecay) if fakt2 > 0.: #for sigma adaption alg. follows only positive gradients if self.exploration == "global": #apply sigma update globally self.sigList = self.gdSig(fakt2 * ((self.deltas ** 2).sum() - (self.sigList ** 2).sum()) / (self.sigList * float(self.numParameters))) elif self.exploration == "local": #apply sigma update locally self.sigList = self.gdSig(fakt2 * (deltas * deltas - self.sigList * self.sigList) / self.sigList) elif self.exploration == "cma": #I have to think about that - needs also an option in perturbation raise NotImplementedError() else: raise NotImplementedError(str(self.exploration) + " not a known exploration parameter setting.")
class TTrainer(BackpropTrainer): def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0, momentum=0., verbose=False, batchlearning=False, weightdecay=0.): super(BackpropTrainer, self).__init__(module) #self.setData(dataset) self.verbose = False self.batchlearning = batchlearning self.weightdecay = weightdecay self.epoch = 0 self.totalepochs = 0 # set up gradient descender self.descent = GradientDescent() self.descent.alpha = learningrate self.descent.momentum = momentum self.descent.alphadecay = lrdecay self.descent.init(module.params) def minibatch_training(self, dataset): for i in xrange(50): #ds = dataset.splitWithProportion(0.01)[0] ds = dataset.batches(1000) self.trainOnDataset(ds) ds.clear() def trainUntilConvergence(self, dataset=None, maxEpochs=None, verbose=None, continueEpochs=10, validationProportion=0.25,FOLDER = os.curdir, modelfile = 'network.model', file = None): """Train the module on the dataset until it converges. Return the module with the parameters that gave the minimal validation error. If no dataset is given, the dataset passed during Trainer initialization is used. validationProportion is the ratio of the dataset that is used for the validation dataset. If maxEpochs is given, at most that many epochs are trained. Each time validation error hits a minimum, try for continueEpochs epochs to find a better one.""" epochs = 0 if dataset == None: dataset = self.ds if verbose == None: verbose = self.verbose # Split the dataset randomly: validationProportion of the samples for # validation. trainingData, validationData = ( dataset.splitWithProportion(1 - validationProportion)) if not (len(trainingData) > 0 and len(validationData)): raise ValueError("Provided dataset too small to be split into training " + "and validation sets with proportion " + str(validationProportion)) self.ds = trainingData bestweights = self.module.params.copy() bestverr = self.testOnData(validationData) trainingErrors = [] validationErrors = [bestverr] while True: if maxEpochs != None: print float(epochs) / maxEpochs trainErr = self.train() validErr = self.testOnData(validationData) errs = [trainErr,validErr] print errs if file != None: with open (os.path.join(FOLDER, file), 'a') as neurons: neurons.write(str(errs)) neurons.write('\n') trainingErrors.append(trainErr) validationErrors.append(validErr) if epochs == 0 or validationErrors[-1] < bestverr: # one update is always done bestverr = validationErrors[-1] bestweights = self.module.params.copy() NetworkWriter.writeToFile(self.module, os.path.join(FOLDER, modelfile)) #with open (os.path.join(FOLDER, modelfile), 'wb') as fileObject: # pickle.dump(self.module,fileObject) if maxEpochs != None and epochs >= maxEpochs: self.module.params[:] = bestweights break epochs += 1 if len(validationErrors) >= continueEpochs * 2: # have the validation errors started going up again? # compare the average of the last few to the previous few old = validationErrors[-continueEpochs * 2:-continueEpochs] new = validationErrors[-continueEpochs:] if min(new) > max(old): self.module.params[:] = bestweights break trainingErrors.append(self.testOnData(trainingData)) self.ds = dataset return trainingErrors, validationErrors