Exemple #1
0
    def __init__(self, evaluator, evaluable, **parameters):
        BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
        self.alphas = ones(self.numberOfCenters) / self.numberOfCenters
        self.mus = []
        self.sigmas = []

        self.tau = 1.
        if self.rangemins == None:
            self.rangemins = -ones(self.xdim)
        if self.rangemaxs == None:
            self.rangemaxs = ones(self.xdim)
        if self.initCovariances == None:
            self.initCovariances = eye(self.xdim)

        if self.elitist and self.numberOfCenters == 1 and not self.noisyEvaluator:
            # in the elitist case seperate evaluations are not necessary.
            # CHECKME: maybe in the noisy case?
            self.evalMus = False

        assert not (self.useCauchy and self.numberOfCenters > 1)

        for dummy in range(self.numberOfCenters):
            self.mus.append(
                rand(self.xdim) * (self.rangemaxs - self.rangemins) +
                self.rangemins)
            self.sigmas.append(dot(eye(self.xdim), self.initCovariances))
        self.reset()
Exemple #2
0
    def __init__(self, evaluator, evaluable, **parameters):
        BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
        self.alphas = ones(self.numberOfCenters)/self.numberOfCenters
        self.mus = []
        self.sigmas = []

        self.tau = 1.
        if self.rangemins == None:
            self.rangemins = -ones(self.xdim)
        if self.rangemaxs == None:
            self.rangemaxs = ones(self.xdim)
        if self.initCovariances == None:
            self.initCovariances = eye(self.xdim)
            
        if self.elitist and self.numberOfCenters == 1 and not self.noisyEvaluator:
            # in the elitist case seperate evaluations are not necessary. 
            # CHECKME: maybe in the noisy case?
            self.evalMus = False
            
        assert not(self.useCauchy and self.numberOfCenters > 1)
            
        for dummy in range(self.numberOfCenters):
            self.mus.append(rand(self.xdim) * (self.rangemaxs-self.rangemins) + self.rangemins)
            self.sigmas.append(dot(eye(self.xdim), self.initCovariances))
        self.reset()
Exemple #3
0
    def __init__(self, evaluator, evaluable, **parameters):
        BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
        n = self.xdim

        # internal executution variables
        self.generation = 0  # current generation
        self.fevals = 0  # nb of evaluations

        # determine batch size
        minlambd = 1 + self.mu * (1 + n + n * n)
        if not self.lambd:
            self.lambd = minlambd
        if self.verbose and self.lambd < minlambd:
            print "Warning! Underconstrained linear regression"

        if not self.lrSigma:
            self.lrSigma = self.lr

        self.genfitnesses = []

        self.alpha = ones(
            self.mu
        ) / self.mu  # relative probabilities of drawing from each of the mu centers
        self.basealpha = ones(self.mu) / self.mu
        # list of the mu current centers
        if self.x0 == None:
            self.x = [self.initialsearchrange * mat(randn(n, 1))]
        else:
            self.x0 = reshape(mat(self.x0), (n, 1))
            self.x = [self.x0]

        for i in range(self.mu - 1):
            # CHECKME: should the centers not be different initially?
            self.x.append(self.x[0].copy())
        self.fx = zeros(self.mu)  # function values for x
        self.factorSigma = [self.genInitSigmaFactor()
                            ]  # the cholesky(??) factor version of sigma
        for dummy in range(self.mu - 1):
            self.factorSigma.append(self.factorSigma[-1] * 1.1)
        self.sigma = []  # the list of the mu current covariance matrices
        for dummy in range(self.mu):
            self.sigma.append(self.factorSigma[dummy].T *
                              self.factorSigma[dummy])
        self.zs = [None
                   ] * self.lambd  # most recent batch of lambd samples drawn
        self.chosenCenter = [None
                             ] * self.lambd  # the center chosen for drawing z
        self.R = mat(zeros((self.lambd, 1)))  # function values for zs
        self.w = None  # the vector with the computed updates for all parameters
        self.rellhood = zeros(
            self.lambd
        )  # vector containing rel. likelihood under current policy

        # a special matrix, containing the log-derivatives
        self.phi = mat(
            ones((self.lambd, 1 + schwupps + self.mu * (n * n + n + 1))))

        # iRPROP+ learning rate multiplier gamma for every generation
        self.delta = []
        self.delta.append(self.rpropInitDelta * ones(1 + (self.mu *
                                                          (n * n + n + 1))))
        # store old parameters
        self.wStored = []
        self.rpropPerformance = []
        self.oldParams = []

        # used for the one-fith-rule
        self.lrMultiplier = 1.
        self.blackmagic = 1.
        if self.advancedfifth and not self.dsigma:
            self.dsigma = float(n + 8) / 4  # CMA heuristic

        if self.returnall:
            self.allsigmas = []
            self.allxs = []
            for i in range(self.mu):
                self.allsigmas.append(self.sigma[i].copy())
                self.allxs.append(self.x[i].copy())
Exemple #4
0
    def __init__(self, evaluator, evaluable, **parameters):
        BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
        n = self.xdim        
        
        # internal executution variables
        self.generation = 0  # current generation
        self.fevals = 0      # nb of evaluations
        
        # determine batch size
        minlambd = 1 + self.mu*(1+n+n*n)
        if not self.lambd:
            self.lambd = minlambd
        if self.verbose and self.lambd < minlambd:
            print "Warning! Underconstrained linear regression"
        
        if not self.lrSigma:
            self.lrSigma = self.lr 
            
        self.genfitnesses = []
                        
        self.alpha = ones(self.mu)/self.mu  # relative probabilities of drawing from each of the mu centers
        self.basealpha = ones(self.mu)/self.mu
        # list of the mu current centers      
        if self.x0 == None:
            self.x = [self.initialsearchrange * mat(randn(n,1))]
        else:
            self.x0 = reshape(mat(self.x0), (n,1))            
            self.x = [self.x0]
            
        for i in range(self.mu - 1):
            # CHECKME: should the centers not be different initially?
            self.x.append(self.x[0].copy())  
        self.fx = zeros(self.mu)            # function values for x         
        self.factorSigma = [self.genInitSigmaFactor()]                 # the cholesky(??) factor version of sigma              
        for dummy in range(self.mu - 1): 
            self.factorSigma.append(self.factorSigma[-1]*1.1)
        self.sigma = []                     # the list of the mu current covariance matrices        
        for dummy in range(self.mu): 
            self.sigma.append(self.factorSigma[dummy].T*self.factorSigma[dummy])             
        self.zs = [None]*self.lambd         # most recent batch of lambd samples drawn           
        self.chosenCenter = [None]*self.lambd   # the center chosen for drawing z        
        self.R = mat(zeros((self.lambd, 1)))    # function values for zs        
        self.w = None                       # the vector with the computed updates for all parameters
        self.rellhood = zeros(self.lambd)    # vector containing rel. likelihood under current policy

        # a special matrix, containing the log-derivatives    
        self.phi = mat(ones((self.lambd, 1 + schwupps + self.mu*(n*n+n+1))))
                 
        # iRPROP+ learning rate multiplier gamma for every generation
        self.delta = []
        self.delta.append(self.rpropInitDelta * ones(1 + (self.mu*(n*n+n+1))))
        # store old parameters
        self.wStored = []
        self.rpropPerformance = []    
        self.oldParams = []
        
        # used for the one-fith-rule
        self.lrMultiplier = 1.    
        self.blackmagic = 1.        
        if self.advancedfifth and not self.dsigma:
            self.dsigma = float(n+8)/4 # CMA heuristic
            
        if self.returnall:
            self.allsigmas = []
            self.allxs = []
            for i in range(self.mu):
                self.allsigmas.append(self.sigma[i].copy())
                self.allxs.append(self.x[i].copy())