def _batchLearn(self): """ Batch learning. """ xdim = self.numParameters # produce samples and evaluate them try: self._produceSamples() # shape their fitnesses shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize :]) # update parameters (unbiased: divide by batchsize) update = self._calcBatchUpdate(shapedFits) if self.elitism: self.x = self.bestEvaluable else: self.x += self.learningRate * update[:xdim] self.factorSigma += self.learningRateSigma * flat2triu(update[xdim:], xdim) self.sigma = dot(self.factorSigma.T, self.factorSigma) except ValueError: print("Numerical Instability. Stopping.") self.maxLearningSteps = self.numLearningSteps if self._hasConverged(): print("Premature convergence. Stopping.") self.maxLearningSteps = self.numLearningSteps if self.verbose: print("Evals:", self.numEvaluations) self.allCenters.append(self.x.copy()) self.allFactorSigmas.append(self.factorSigma.copy()) if self.storeAllDistributions: self._allDistributions.append((self.x.copy(), self.sigma.copy()))
def _batchLearn(self, maxSteps): """ Batch learning. """ while (self.evalsDone < maxSteps and not self.bestEvaluation >= self.desiredEvaluation): # produce samples and evaluate them try: self._produceSamples() # shape their fitnesses shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize:]) # update parameters (unbiased: divide by batchsize) update = self._calcBatchUpdate(shapedFits) if self.elitism: self.x = self.bestEvaluable else: self.x += self.learningRate * update[:self.xdim] self.factorSigma += self.learningRateSigma * flat2triu(update[self.xdim:], self.xdim) self.sigma = dot(self.factorSigma.T, self.factorSigma) except ValueError: print 'Numerical Instability. Stopping.' break if self._hasConverged(): print 'Premature convergence. Stopping.' break if self.verbose: print 'G:', self.generation, 'Evals:', self.evalsDone, 'MaxG:', max(self.allFitnesses[-self.batchSize:]) self.allCenters.append(self.x.copy()) self.allFactorSigmas.append(self.factorSigma.copy()) self.generation += 1
def _batchLearn(self): """ Batch learning. """ xdim = self.numParameters # produce samples and evaluate them try: self._produceSamples() # shape their fitnesses shapedFits = self.shapingFunction( self.allFitnesses[-self.batchSize:]) # update parameters (unbiased: divide by batchsize) update = self._calcBatchUpdate(shapedFits) if self.elitism: self.x = self.bestEvaluable else: self.x += self.learningRate * update[:xdim] self.factorSigma += self.learningRateSigma * flat2triu( update[xdim:], xdim) self.sigma = dot(self.factorSigma.T, self.factorSigma) except ValueError: print 'Numerical Instability. Stopping.' self.maxLearningSteps = self.numLearningSteps if self._hasConverged(): print 'Premature convergence. Stopping.' self.maxLearningSteps = self.numLearningSteps if self.verbose: print 'Evals:', self.numEvaluations, self.allCenters.append(self.x.copy()) self.allFactorSigmas.append(self.factorSigma.copy())
def _onlineLearn(self): """ Online learning. """ # produce one sample and evaluate xdim = self.numParameters self._produceNewSample() if len(self.allSamples) <= self.batchSize: return # shape the fitnesses of the last samples shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize :]) # update parameters update = self._calcOnlineUpdate(shapedFits) self.x += self.learningRate * update[:xdim] self.factorSigma += self.learningRateSigma * flat2triu(update[xdim:], xdim) self.sigma = dot(self.factorSigma.T, self.factorSigma)
def _onlineLearn(self): """ Online learning. """ # produce one sample and evaluate xdim = self.numParameters self._produceNewSample() if len(self.allSamples) <= self.batchSize: return # shape the fitnesses of the last samples shapedFits = self.shapingFunction(self.allFitnesses[-self.batchSize:]) # update parameters update = self._calcOnlineUpdate(shapedFits) self.x += self.learningRate * update[:xdim] self.factorSigma += self.learningRateSigma * flat2triu( update[xdim:], xdim) self.sigma = dot(self.factorSigma.T, self.factorSigma)
def _batchLearn(self, maxSteps): """ Batch learning. """ while (self.evalsDone < maxSteps and not self.bestEvaluation >= self.desiredEvaluation): # produce samples and evaluate them try: self._produceSamples() # shape their fitnesses shapedFits = self.shapingFunction( self.allFitnesses[-self.batchSize:]) # update parameters (unbiased: divide by batchsize) update = self._calcBatchUpdate(shapedFits) if self.elitism: self.x = self.bestEvaluable else: self.x += self.learningRate * update[:self.xdim] self.factorSigma += self.learningRateSigma * flat2triu( update[self.xdim:], self.xdim) self.sigma = dot(self.factorSigma.T, self.factorSigma) except ValueError: print 'Numerical Instability. Stopping.' break if self._hasConverged(): print 'Premature convergence. Stopping.' break if self.verbose: print 'G:', self.generation, 'Evals:', self.evalsDone, 'MaxG:', max( self.allFitnesses[-self.batchSize:]) self.allCenters.append(self.x.copy()) self.allFactorSigmas.append(self.factorSigma.copy()) self.generation += 1