Example #1
0
 def __init__(self, evaluator, evaluable, **args):
     assert not isinstance(evaluable, TopologyEvolvable)
     if isinstance(evaluable, Module):
         evaluable = MaskedModule(evaluable)
     else:
         evaluable = MaskedParameters(evaluable)
     Learner.__init__(self, evaluator, evaluable, **args)    
Example #2
0
 def __init__(self, evaluator, evaluable, **args):
     Learner.__init__(self, evaluator, evaluable, **args)
     
     if isinstance(evaluable, ParameterContainer):
         # in this case we have to wrap the evaluator
         self.wrappingEvaluable = evaluable.copy()
         self.wrappingEvaluable.name = 'opt-by-'+self.__class__.__name__
         def wrappedEvaluator(x):
             self.wrappingEvaluable._setParameters(x)
             return evaluator(self.wrappingEvaluable)
         self.evaluator = wrappedEvaluator
         self.x0 = evaluable.params.copy()
         self.bestEvaluable = self.x0.copy()
     else:
         self.x0 = evaluable.copy()
         
     if self.minimize:
         # then we need to change the sign of the evaluations
         tmp = self.evaluator
         self.evaluator = lambda x: -tmp(x)
         if self.desiredEvaluation != None:
             self.desiredEvaluation *= -1
         
     # the first guess at the solution (it must be an array)
     assert type(self.x0) == ndarray
     self.xdim = size(self.x0)
     if self.initialSearchRange != None:
         for i  in range(self.xdim):
             mi, ma = self.initialSearchRange[i]
             self.x0[i] = rand()*(ma-mi)+mi
     self.noisyEvaluator = evaluator.noisy
Example #3
0
 def __init__(self, evaluator, evaluable, **args):
     Learner.__init__(self, evaluator, evaluable, **args)
     
     if isinstance(evaluable, ParameterContainer):
         # in this case we have to wrap the evaluator
         self.wrappingEvaluable = evaluable.copy()
         self.wrappingEvaluable.name = 'opt-by-'+self.__class__.__name__
         def wrappedEvaluator(x):
             self.wrappingEvaluable._setParameters(x)
             return evaluator(self.wrappingEvaluable)
         self.evaluator = wrappedEvaluator
         self.x0 = evaluable.params.copy()
         self.bestEvaluable = self.x0.copy()
     else:
         self.x0 = evaluable.copy()
         
     if self.minimize:
         # then we need to change the sign of the evaluations
         tmp = self.evaluator
         self.evaluator = lambda x: -tmp(x)
         if self.desiredEvaluation != None:
             self.desiredEvaluation *= -1
         
     # the first guess at the solution (it must be an array)
     assert type(self.x0) == ndarray
     self.xdim = size(self.x0)
     if self.initialSearchRange != None:
         for i  in range(self.xdim):
             mi, ma = self.initialSearchRange[i]
             self.x0[i] = rand()*(ma-mi)+mi
     self.noisyEvaluator = evaluator.noisy
Example #4
0
 def learn(self, maxSteps = None):
     """ Some BlackBoxOptimizers can only be called one time, and currently
     do not support iteratively adding more steps. """
     if not self.online:
         if self.maxEvaluations != None:
             if maxSteps != None:
                 maxSteps = min(maxSteps, self.maxEvaluations - self.steps)
             else:
                 maxSteps = self.maxEvaluations-self.steps
         self._batchLearn(maxSteps)
     else:
         Learner.learn(self, maxSteps)
     
     if self.wrappingEvaluable != None and isinstance(self.bestEvaluable, ndarray):
         xopt = self.bestEvaluable
         self.wrappingEvaluable._setParameters(xopt)
         self.bestEvaluable = self.wrappingEvaluable
     
     if self.minimize:
         self.bestEvaluation *= -1
     return self.bestEvaluable, self.bestEvaluation
Example #5
0
 def learn(self, maxSteps = None):
     """ Some BlackBoxOptimizers can only be called one time, and currently
     do not support iteratively adding more steps. """
     if not self.online:
         if self.maxEvaluations != None:
             if maxSteps != None:
                 maxSteps = min(maxSteps, self.maxEvaluations - self.steps)
             else:
                 maxSteps = self.maxEvaluations-self.steps
         self._batchLearn(maxSteps)
     else:
         Learner.learn(self, maxSteps)
     
     if self.wrappingEvaluable != None and isinstance(self.bestEvaluable, ndarray):
         xopt = self.bestEvaluable
         self.wrappingEvaluable._setParameters(xopt)
         self.bestEvaluable = self.wrappingEvaluable
     
     if self.minimize:
         self.bestEvaluation *= -1
     return self.bestEvaluable, self.bestEvaluation