Ejemplo n.º 1
0
    def action_mapper(self, action):

        if self.method in ['ppo', 'a2c', 'acktr', 'neat']:
            #--------------------------
            # cont./discrete methods
            #---------------------------
            if self.map_flag:
                action = action_map(norm_action=action,
                                    lb=self.lb,
                                    ub=self.ub,
                                    lb_norm=self.lb_norm,
                                    ub_norm=self.ub_norm)
            if 'int' in self.var_type:
                action = ensure_discrete(action=action, var_type=self.var_type)
            if self.grid_flag:
                #decode the individual back to the int/float/grid mixed space
                action = decode_discrete_to_grid(action, self.orig_bounds,
                                                 self.bounds_map)

            self.state = action.copy()

        elif self.method in ['acer', 'dqn']:
            #--------------------------
            # discrete methods
            #---------------------------
            if self.index < self.nx:
                decoded_action = self.discrete_map[action]

                if decoded_action >= self.lb[
                        self.index] and decoded_action <= self.ub[self.index]:
                    self.full_action[self.index] = decoded_action
                else:
                    #print(self.index, decoded_action, 'random guess')
                    self.full_action[self.index] = random.randint(
                        self.lb[self.index], self.ub[self.index])
                self.index += 1
            else:
                self.index = 0  #start a new loop over the individual
                self.full_action = self.observation_space.sample()

            if self.grid_flag:
                #decode the individual back to the int/float/grid mixed space
                self.decoded_action = decode_discrete_to_grid(
                    self.full_action, self.orig_bounds,
                    self.bounds_map)  #convert integer to categorical
                action = self.decoded_action.copy(
                )  #returned the decoded action for reward evaluation
            else:
                action = self.full_action.copy(
                )  #returned the original action for reward evaluation

            self.state = self.full_action.copy(
            )  #take the unconverted original anyway as next state

        return action
Ejemplo n.º 2
0
    def fit_worker(self, x):
        #"""
        #Evaluates fitness of an individual.
        #"""

        #mir-grid
        if self.grid_flag:
            #decode the individual back to the int/float/grid mixed space
            x = decode_discrete_to_grid(x, self.orig_bounds, self.bounds_map)

        fitness = self.fit(x)
        return fitness
Ejemplo n.º 3
0
 def fit_worker(self, x):
     #This worker is for parallel calculations
     
     # Clip the bat with position outside the lower/upper bounds and return same position
     x=self.ensure_bounds(x,self.bounds)
     
     if self.grid_flag:
         #decode the individual back to the int/float/grid mixed space
         x=decode_discrete_to_grid(x,self.orig_bounds,self.bounds_map)
     
     # Calculate objective function for each search agent
     fitness = self.fit(x)
     
     return fitness
Ejemplo n.º 4
0
    def fit_worker(self, hawk_pos):
        #"""
        #Evaluates fitness of a hawk.

        #Params:
        #hawk_pos - hawk position vector

        #Return:
        #float - hawk fitness
        #"""
        
        #mir---
        hawk_pos=self.ensure_bounds(hawk_pos, self.bounds)
        
        #mir-grid
        if self.grid_flag:
            #decode the individual back to the int/float/grid mixed space
            hawk_pos=decode_discrete_to_grid(hawk_pos,self.orig_bounds,self.bounds_map) 
                    
        fitness = self.fit(hawk_pos)
        return fitness
Ejemplo n.º 5
0
 def evolute(self, ngen, x0=None, verbose=True):
     """
     This function evolutes the BAT algorithm for number of generations.
     
     :param ngen: (int) number of generations to evolute
     :param x0: (list of lists) initial position of the bats (must be of same size as ``nbats``)
     :param verbose: (bool) print statistics to screen
     
     :return: (dict) dictionary containing major BAT search results
     """
     self.history = {'local_fitness':[], 'global_fitness':[], 'A': [], 'r': []}
     self.fbest=float("inf")
     self.verbose=verbose
     self.Positions = np.zeros((self.nbats, self.dim))
     self.r=self.r0
     if x0:
         assert len(x0) == self.nbats, '--error: the length of x0 ({}) MUST equal the number of bats in the group ({})'.format(len(x0), self.nbats)
         for i in range(self.nbats):
             self.Positions[i,:] = x0[i]
     else:
         # Initialize the positions of bats
         for i in range(self.nbats):
             self.Positions[i,:]=self.init_sample(self.bounds)
     
     #Initialize and evaluate the first bat population
     fitness0=self.eval_bats(position_array=self.Positions)
     x0, f0=self.select(pos=self.Positions,fit=fitness0)
     self.xbest=np.copy(x0)
     
     # Main BAT loop
     for l in range(0, ngen):
         self.a= 1 - l * ((1) / ngen)  #mir: a decreases linearly between 1 to 0, for discrete mutation
         #------------------------------------------------------
         # Stage 1A: Loop over all bats to update the positions
         #------------------------------------------------------
         for i in range(0, self.nbats):
             
             if self.levy_flight:
                 #Eq.(11) make a levy flight jump to increase population diversity
                 self.Positions[i,:]=self.Positions[i,:]+np.multiply(np.random.randn(self.dim), self.Levy(self.dim))
             
             #Eq.(8)-(10)
             f1=((self.fmin-self.fmax)*l/ngen+self.fmax)*random.random()
             f2=((self.fmax-self.fmin)*l/ngen+self.fmin)*random.random()
             betas=random.sample(list(range(0,self.nbats)),4)
             self.Positions[i, :]=self.xbest+f1*(self.Positions[betas[0],:]-self.Positions[betas[1],:])
             +f2*(self.Positions[betas[2],:]-self.Positions[betas[3],:])
             self.Positions[i, :] = self.ensure_bounds(self.Positions[i , :], self.bounds)
             self.Positions[i, :] = self.ensure_discrete(self.Positions[i , :])
         #-----------------------
         #Stage 1B: evaluation
         #-----------------------
         fitness1=self.eval_bats(position_array=self.Positions)
         x1, f1=self.select(pos=self.Positions,fit=fitness1)
         if f1 <= self.fbest:
             self.fbest=f1
             self.xbest=x1.copy()
         #---------------------------------
         #Stage 2A: Generate new positions
         #---------------------------------
         for i in range(0, self.nbats):
             # Pulse rate
             if random.random() > self.r:
                 self.Positions[i, :] = self.xbest + self.A * np.random.uniform(-1,1,self.dim)
             self.Positions[i, :] = self.ensure_bounds(self.Positions[i , :], self.bounds)
             self.Positions[i, :] = self.ensure_discrete(self.Positions[i , :])
         #-----------------------
         #Stage 2B: evaluation
         #-----------------------
         fitness2=self.eval_bats(position_array=self.Positions)
         x2, f2=self.select(pos=self.Positions,fit=fitness2)
         if f2 <= self.fbest:
             self.fbest=f2
             self.xbest=x2.copy()
         #---------------------------------
         #Stage 3A: Generate new positions
         #---------------------------------
         for i in range(0, self.nbats):
             # loudness effect
             if random.random() < self.A:
                 self.Positions[i, :] = self.xbest + self.r * np.random.uniform(-1,1,self.dim)
             self.Positions[i, :] = self.ensure_bounds(self.Positions[i , :], self.bounds)
             self.Positions[i, :] = self.ensure_discrete(self.Positions[i , :])
         #-----------------------
         #Stage 3B: evaluation
         #-----------------------
         fitness3=self.eval_bats(position_array=self.Positions)
         x3, f3=self.select(pos=self.Positions,fit=fitness3)
         if f3 <= self.fbest:
             self.fbest=f3
             self.xbest=x3.copy()
         #---------------------------------
         #Stage 4: Check and update A/r
         #---------------------------------                
         if min(f1, f2, f3) <= self.fbest:
             #update A
             self.A = self.alpha*self.A
             #update r
             self.r = self.r0*(1-math.exp(-self.gamma*l))  
         #---------------------------------
         #Stage 5: post-processing
         #---------------------------------             
         #--mir
         if self.mode=='max':
             self.fitness_best_correct=-self.fbest
             self.local_fitness = -min(f1 , f2 , f3)
         else:
             self.fitness_best_correct=self.fbest
             self.local_fitness = min(f1 , f2 , f3)
         
         self.best_position=self.xbest.copy()
         self.history['local_fitness'].append(self.local_fitness)
         self.history['global_fitness'].append(self.fitness_best_correct)
         self.history['A'].append(self.A)
         self.history['r'].append(self.r)   
         
         # Print statistics
         if self.verbose and i % self.nbats:
             print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
             print('BAT step {}/{}, nbats={}, Ncores={}'.format((l+1)*self.nbats, ngen*self.nbats, self.nbats, self.ncores))
             print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
             print('Best Bat Fitness:', np.round(self.fitness_best_correct,6))
             if self.grid_flag:
                 self.bat_decoded = decode_discrete_to_grid(self.best_position, self.orig_bounds, self.bounds_map)
                 print('Best Bat Position:', np.round(self.bat_decoded,6))
             else:
                 print('Best Bat Position:', np.round(self.best_position,6))
             print('Loudness A:', self.A)
             print('Pulse rate r:', self.r)
             print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
     
     #mir-grid
     if self.grid_flag:
         self.bat_correct = decode_discrete_to_grid(self.best_position, self.orig_bounds, self.bounds_map)
     else:
         self.bat_correct = self.best_position
             
     if self.verbose:
         print('------------------------ BAT Summary --------------------------')
         print('Best fitness (y) found:', self.fitness_best_correct)
         print('Best individual (x) found:', self.bat_correct)
         print('--------------------------------------------------------------')  
         
     return self.bat_correct, self.fitness_best_correct, self.history
Ejemplo n.º 6
0
    def evolute(self, ngen, x0=None, verbose=0):
        """
        This function evolutes the ES algorithm for number of generations.
        
        :param ngen: (int) number of generations to evolute
        :param x0: (list of lists) the initial position of the swarm particles
        :param verbose: (bool) print statistics to screen
        
        :return: (dict) dictionary containing major ES search results
        """
        self.y_opt = -np.inf
        self.best_scores = []
        self.best_indvs = []
        if x0:
            assert len(
                x0
            ) == self.lambda_, '--error: the length of x0 ({}) (initial population) must equal to the size of lambda ({})'.format(
                len(x0), self.lambda_)
            population = self.init_pop(x0=x0)
        else:
            population = self.init_pop()

        # Begin the evolution process
        for gen in range(1, ngen + 1):

            # Vary the population and generate new offspring
            offspring = self.GenOffspring(pop=population)

            # Evaluate the individuals with an invalid fitness with multiprocessign Pool
            # create and run the Pool
            if self.ncores > 1:
                core_list = []
                for key in offspring:
                    core_list.append(offspring[key][0])

                with joblib.Parallel(n_jobs=self.ncores) as parallel:
                    fitness = parallel(
                        joblib.delayed(self.fit_worker)(item)
                        for item in core_list)

                [
                    offspring[ind].append(fitness[ind])
                    for ind in range(len(offspring))
                ]

            else:  #serial calcs

                for ind in range(len(offspring)):
                    fitness = self.fit_worker(offspring[ind][0])
                    offspring[ind].append(fitness)

            # Select the next generation population
            #print(offspring)
            population = copy.deepcopy(self.select(pop=offspring, k=self.mu))
            #print(population)
            inds, rwd = [population[i][0] for i in population
                         ], [population[i][2] for i in population]
            self.best_scores.append(np.max(rwd))
            arg_max = np.argmax(rwd)
            self.best_indvs.append(inds[arg_max])
            if rwd[arg_max] > self.y_opt:
                self.y_opt = rwd[arg_max]
                self.x_opt = copy.deepcopy(inds[arg_max])

            #--mir
            if self.mode == 'min':
                self.y_opt_correct = -self.y_opt
            else:
                self.y_opt_correct = self.y_opt

            #mir-grid
            if self.grid_flag:
                self.x_opt_correct = decode_discrete_to_grid(
                    self.x_opt, self.orig_bounds, self.bounds_map)
            else:
                self.x_opt_correct = self.x_opt

            if verbose:
                mean_strategy = [np.mean(population[i][1]) for i in population]
                print(
                    '##############################################################################'
                )
                print(
                    'ES step {}/{}, CX={}, MUT={}, MU={}, LAMBDA={}, Ncores={}'
                    .format(gen * self.lambda_, ngen * self.lambda_,
                            np.round(self.cxpb, 2), np.round(self.mutpb, 2),
                            self.mu, self.lambda_, self.ncores))
                print(
                    '##############################################################################'
                )
                print('Statistics for generation {}'.format(gen))
                print(
                    'Best Fitness:',
                    np.round(np.max(rwd), 6)
                    if self.mode == 'max' else -np.round(np.max(rwd), 6))
                print(
                    'Best Individual:',
                    inds[0] if not self.grid_flag else decode_discrete_to_grid(
                        inds[0], self.orig_bounds, self.bounds_map))
                print('Max Strategy:', np.round(np.max(mean_strategy), 3))
                print('Min Strategy:', np.round(np.min(mean_strategy), 3))
                print('Average Strategy:', np.round(np.mean(mean_strategy), 3))
                print(
                    '##############################################################################'
                )

        if verbose:
            print(
                '------------------------ ES Summary --------------------------'
            )
            print('Best fitness (y) found:', self.y_opt_correct)
            print('Best individual (x) found:', self.x_opt_correct)
            print(
                '--------------------------------------------------------------'
            )

        #--mir
        if self.mode == 'min':
            self.best_scores = [-item for item in self.best_scores]

        return self.x_opt_correct, self.y_opt_correct, self.best_scores
Ejemplo n.º 7
0
 def printout(self, mode, gen):
     #"""
     #Print statistics to screen
     #inputs:
     #    mode (int): 1 to print for individual algorathims and 2 to print for PESA 
     #    gen (int): current generation number 
     #"""
     if mode == 1:
         print('***********************************************************************************************')
         print('############################################################')
         print('ES step {}/{}, CX={}, MUT={}, MU={}, LAMBDA={}'.format(self.STEP0-1,self.STEPS, np.round(self.CXPB,2), np.round(self.MUTPB,2), self.MU, self.LAMBDA))
         print('############################################################')
         print('Statistics for generation {}'.format(gen))
         print('Best Fitness:', np.round(np.max(self.rwd),4) if self.mode == 'max' else -np.round(np.max(self.rwd),4))
         print('Max Strategy:', np.round(np.max(self.mean_strategy),3))
         print('Min Strategy:', np.round(np.min(self.mean_strategy),3))
         print('Average Strategy:', np.round(np.mean(self.mean_strategy),3))
         print('############################################################')
               
         print('************************************************************')
         print('SA step {}/{}, T={}'.format(self.STEP0-1,self.STEPS,np.round(self.T)))
         print('************************************************************')
         print('Statistics for the {} parallel chains'.format(self.NCORES))
         print('Fitness:', np.round(self.E_next,4) if self.mode == 'max' else -np.round(self.E_next,4))
         print('Acceptance Rate (%):', self.acc)
         print('Rejection Rate (%):', self.rej)
         print('Improvment Rate (%):', self.imp)
         print('************************************************************')
         
         if self.pso_flag:
             print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
             print('PSO step {}/{}, C1={}, C2={}, Particles={}'.format(self.STEP0-1,self.STEPS, np.round(self.C1,2), np.round(self.C2,2), self.NPAR))
             print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
             print('Statistics for generation {}'.format(gen))
             print('Best Swarm Fitness:', np.round(self.swm_fit,4) if self.mode == 'max' else -np.round(self.swm_fit,4))
             print('Best Swarm Position:', self.swm_pos if not self.grid_flag else decode_discrete_to_grid(self.swm_pos,self.orig_bounds,self.bounds_map))
             print('Max Speed:', np.round(np.max(self.mean_speed),3))
             print('Min Speed:', np.round(np.min(self.mean_speed),3))
             print('Average Speed:', np.round(np.mean(self.mean_speed),3))
             print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')
     
     if mode == 2:
         print('------------------------------------------------------------')
         print('PESA step {}/{}, Ncores={}'.format(self.STEP0-1,self.STEPS, self.ncores))
         print('------------------------------------------------------------')
         print('PESA statistics for generation {}'.format(gen))
         print('Best Fitness:', self.pesa_best[1] if self.mode == 'max' else -self.pesa_best[1])
         print('Best Individual:', self.xbest_correct)
         print('ALPHA:', np.round(self.ALPHA,3))
         print('Memory Size:', self.memory_size)
         print('------------------------------------------------------------')
         
         print('***********************************************************************************************')
Ejemplo n.º 8
0
    def evolute(self, ngen, x0=None, warmup=100, verbose=True):
        """
        This function evolutes the PESA algorithm for number of generations.
        
        :param ngen: (int) number of generations to evolute
        :param x0: (list of lists) initial samples to start the replay memory (``len(x0)`` must be equal or more than ``npop``)
        :param warmup: (int) number of random warmup samples to initialize the replay memory and must be equal or more than ``npop`` (only used if ``x0=None``)
        :param verbose: (int) print statistics to screen, 0: no print, 1: PESA print, 2: detailed print
        
        :return: (dict) dictionary containing major PESA search results
        """
        
        self.verbose=verbose
        self.NGEN=ngen
        self.STEPS=self.NGEN*self.NPOP #all 
        if self.memory_size:
            self.MEMORY_SIZE=self.memory_size
        else:
            self.MEMORY_SIZE=self.STEPS*3+1 #PESA
            
        #-------------------------------------------------------
        # Check if initial pop is provided as initial guess 
        #-------------------------------------------------------
        if x0: 
            # use provided initial guess
            warm=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.ncores)
            x0size=len(x0)
            assert x0size >= self.NPOP, 'the number of lists in x0 ({}) must be more than or equal npop ({})'.format(x0size, self.NPOP)
            self.pop0=warm.init_pop(warmup=x0size, x_known=x0)  #initial population for ES
        else:
            #create initial guess 
            assert warmup > self.NPOP, 'the number of warmup samples ({}) must be more than npop ({})'.format(warmup, self.NPOP)
            warm=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.ncores)
            self.pop0=warm.init_pop(warmup=warmup)  #initial population for ES
            
        self.partime={}
        self.partime['pesa']=[]
        self.partime['es']=[]
        self.partime['pso']=[]
        self.partime['sa']=[]
        self.fit_hist=[]
        #------------------------------
        # Step 1: Initialize the memory
        #------------------------------
        self.mymemory=ExperienceReplay(size=self.MEMORY_SIZE) #memory object
        xvec0, obj0=[self.pop0[item][0] for item in self.pop0], [self.pop0[item][2] for item in self.pop0]  #parse the initial samples
        self.mymemory.add(xvec=xvec0, obj=obj0, method=['na']*len(xvec0)) # add initial samples to the replay memory
        
        #--------------------------------
        # Step 2: Initialize all methods
        #--------------------------------
        # Obtain initial population for all methods
        espop0, swarm0, swm_pos0, swm_fit0, local_pos, local_fit, x0, E0=self.init_guess(pop0=self.pop0)
        # Initialize ES class
        es=ESMod(bounds=self.bounds, fit=self.fit_worker, mu=self.MU, lambda_=self.LAMBDA, ncores=self.NCORES, indpb=self.INDPB, 
                 cxpb=self.CXPB, mutpb=self.MUTPB, smin=self.SMIN, smax=self.SMAX)
        # Initialize SA class
        sa=SAMod(bounds=self.bounds, memory=self.mymemory, fit=self.fit_worker, steps=self.STEPS, ncores=self.NCORES, 
                 chi=self.CHI, replay_rate=self.REPLAY_RATE, cooling=self.COOLING, Tmax=self.TMAX, Tmin=self.TMIN)
        # Initialize PSO class (if USED)
        if self.pso_flag:
            pso=PSOMod(bounds=self.bounds, fit=self.fit_worker, npar=self.NPAR, swm0=[swm_pos0,swm_fit0], 
                       ncores=self.NCORES, c1=self.C1, c2=self.C2, speed_mech=self.SPEED_MECH)
            
        #--------------------------------
        # Step 3: Initialize PESA engine
        #--------------------------------
        #Use initial samples as first guess for SA, ES, and PSO
        self.pop_next=deepcopy(espop0) # x0 for ES
        self.x_next, self.E_next=deepcopy(x0), deepcopy(E0) # x0 for SA
        if self.pso_flag:
            self.swm_next, self.local_pos_next, self.local_fit_next=deepcopy(swarm0), deepcopy(local_pos), deepcopy(local_fit) # x0 for PSO (if used)
        self.STEP0=1  #step counter
        self.ALPHA=self.ALPHA0  #set alpha to alpha0
        
        #--------------------------------
        # Step 4: PESA evolution
        #--------------------------------
        for gen in range(1,self.NGEN+1):
            
            caseids=['es_gen{}_ind{}'.format(gen,ind+1) for ind in range(self.LAMBDA)] # save caseids for ES 
            if self.pso_flag:
                pso_caseids=['pso_gen{}_par{}'.format(gen+1,ind+1) for ind in range(self.NPAR)] # save caseids for PSO 
            #-------------------------------------------------------------------------------------------------------------------
            # Step 5: evolute all methods for 1 generation 
            #-------------------------------------------------------------------------------------------------------------------
            #**********************************
            #--Step 5A: Complete PARALEL calcs 
            # via multiprocess.Process
            #*********************************
            if self.PROC:
                t0=time.time()
                
                QSA = Queue(); QES=Queue(); QPSO=Queue()
                def sa_worker():
                    x_new, E_new, self.T, self.acc, self.rej, self.imp, x_best, E_best, sa_partime= sa.anneal(ngen=1,npop=self.NPOP, x0=self.x_next, 
                                                                                                  E0=self.E_next, step0=self.STEP0)
                    QSA.put((x_new, E_new, self.T, self.acc, self.rej, self.imp, x_best, E_best, sa_partime))
                def es_worker():
                    random.seed(self.SEED)
                    pop_new, es_partime=es.evolute(population=self.pop_next,ngen=1,caseids=caseids)
                    QES.put((pop_new, es_partime))
                def pso_worker():
                    random.seed(self.SEED)
                    if gen > 1:
                        swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next, local_fit=self.local_fit_next, 
                                                                      swm_best=[self.swm_pos, self.swm_fit], mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS, 
                                                                      caseids=pso_caseids, verbose=0)
                    else:
                        swm_new, swm_pos_new, swm_fit_new, pso_partime=pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next, 
                                                                      local_fit=self.local_fit_next, mu=self.MU, exstep=self.STEP0, exsteps=self.STEPS, 
                                                                      caseids=pso_caseids, verbose=0)
                    QPSO.put((swm_new, swm_pos_new, swm_fit_new, pso_partime))
                Process(target=sa_worker).start()
                Process(target=es_worker).start()
                
                if self.pso_flag:
                    Process(target=pso_worker).start()
                    self.swm_next, self.swm_pos, self.swm_fit, pso_partime=QPSO.get()
                    self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
                    self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
                     
                self.x_next, self.E_next, self.T, self.acc, self.rej, self.imp, self.x_best, self.E_best, sa_partime=QSA.get()
                self.pop_next, es_partime=QES.get()
                #self.partime.append(time.time()-t0)
                self.partime['pesa'].append(time.time()-t0)
                self.partime['pso'].append(pso_partime)
                self.partime['es'].append(es_partime)
                self.partime['sa'].append(sa_partime)
                                
            #*********************************
            #--Step 5B: Complete Serial calcs
            #*********************************
            else:  
                self.pop_next, _ =es.evolute(population=self.pop_next,ngen=1,caseids=caseids) #ES serial
                self.x_next, self.E_next, self.T, self.acc, self.rej, self.imp, self.x_best, self.E_best, _ = sa.anneal(ngen=1,npop=self.NPOP, x0=self.x_next, 
                                                                                                     E0=self.E_next, step0=self.STEP0) #SA serial
                if self.pso_flag:
                    self.swm_next, self.swm_pos, self.swm_fit, _ =pso.evolute(ngen=1, swarm=self.swm_next, local_pos=self.local_pos_next, 
                                                                          local_fit=self.local_fit_next, exstep=self.STEP0, exsteps=self.STEPS,
                                                                          caseids=pso_caseids, mu=self.MU, verbose=0)
                    self.local_pos_next=[self.swm_next[key][3] for key in self.swm_next]
                    self.local_fit_next=[self.swm_next[key][4] for key in self.swm_next]
            

            #*********************************************************
            # Step 5C: Obtain relevant statistics for this generation 
            #*********************************************************
            self.STEP0=self.STEP0+self.NPOP  #update step counter
            self.inds, self.rwd=[self.pop_next[i][0] for i in self.pop_next], [self.pop_next[i][2] for i in self.pop_next]  #ES statistics
            self.mean_strategy=[np.mean(self.pop_next[i][1]) for i in self.pop_next]  #ES statistics 
            if self.pso_flag:
                self.pars, self.fits=[self.swm_next[i][0] for i in self.swm_next], [self.swm_next[i][2] for i in self.swm_next]  #PSO statistics 
                self.mean_speed=[np.mean(self.swm_next[i][1]) for i in self.swm_next]
                
            if self.verbose==2:
                self.printout(mode=1, gen=gen)
            #-------------------------------------------------------------------------------------------------------------------
            #-------------------------------------------------------------------------------------------------------------------
            
            #-----------------------------
            # Step 6: Update the memory
            #-----------------------------
            self.memory_update()
            
            #-----------------------------------------------------------------
            # Step 7: Sample from the memory and prepare for next Generation 
            #-----------------------------------------------------------------
            self.resample()
            
            #--------------------------------------------------------
            # Step 8: Anneal Alpha if priortized replay is used
            #--------------------------------------------------------
            if self.MODE=='prior': #anneal alpha between alpha0 (lower) and alpha1 (upper) 
                self.ALPHA=self.linear_anneal(step=self.STEP0, total_steps=self.STEPS, a0=self.ALPHA0, a1=self.ALPHA1)
            
            #--------------------------------------------------------
            # Step 9: Calculate the memory best and print PESA summary 
            #--------------------------------------------------------
            self.pesa_best=self.mymemory.sample(batch_size=1,mode='greedy')[0]  #`greedy` will sample the best in memory
            self.fit_hist.append(self.pesa_best[1])
            self.memory_size=len(self.mymemory.storage) #memory size so far
                
            #--mir
            if self.mode=='min':
                self.fitness_best=-self.pesa_best[1]
            else:
                self.fitness_best=self.pesa_best[1]
            
            #mir-grid
            if self.grid_flag:
                self.xbest_correct=decode_discrete_to_grid(self.pesa_best[0],self.orig_bounds,self.bounds_map)
            else:
                self.xbest_correct=self.pesa_best[0]
                
            if self.verbose:  #print summary data to screen
                self.printout(mode=2, gen=gen)

        if self.verbose:
            print('------------------------ PESA Summary --------------------------')
            print('Best fitness (y) found:', self.fitness_best)
            print('Best individual (x) found:', self.xbest_correct)
            print('--------------------------------------------------------------') 
                
        #--mir
        if self.mode=='min':
            self.fit_hist=[-item for item in self.fit_hist]
        
        return self.xbest_correct, self.fitness_best, self.fit_hist
Ejemplo n.º 9
0
    def evolute(self, ngen, x0=None, verbose=True):
        """
        This function evolutes the HHO algorithm for number of generations.
        
        :param ngen: (int) number of generations to evolute
        :param x0: (list of lists) initial position of the hawks (must be of same size as ``nhawks``)
        :param verbose: (bool) print statistics to screen
        
        :return: (tuple) (best position, best fitness, and dictionary containing major search results)
        """
        if self.seed:
            random.seed(self.seed)
            np.random.seed(self.seed)

        self.history = {'local_fitness':[], 'global_fitness':[]}
        self.rabbit_energy = float("inf")
        self.rabbit_location = np.zeros(self.dim)
        self.verbose = verbose

        ##################################
        # Set initial locations of hawks #
        ##################################
        self.hawk_positions = np.zeros((self.nhawks, self.dim))
        if x0:
            assert len(x0) == self.nhawks, 'Length of x0 array MUST equal the number of hawks (self.nhawks).'
            self.hawk_positions = x0
        else:
            # self.hawk_positions = np.asarray([x * (self.ub - self.lb) + self.lb for x in np.random.uniform(0, 1, (self.nhawks, self.dim))])
            for hawk_i in range(self.nhawks):
                self.hawk_positions[hawk_i, :] = self.init_sample()

        for t in range(ngen):
            self.a= 1 - t * ((1) / ngen)  #mir: a decreases linearly between 1 to 0, for discrete mutation
            ###########################
            # Evaluate hawk fitnesses #
            ###########################
            fitness_lst = self.eval_hawks()

            #######################################################################
            # Update rabbit energy and rabbit location based on best hawk fitness #
            #######################################################################
            for i, fitness in enumerate(fitness_lst):
                fitness = fitness if self.mode == 'min' else -fitness
                if fitness < self.rabbit_energy:
                    self.rabbit_energy = fitness
                    self.rabbit_location = self.hawk_positions[i, :].copy()

            #####################################################
            # Update best global and local fitnesses in history #
            #####################################################
            if self.mode=='max':
                self.best_global_fitness = -self.rabbit_energy
                self.best_local_fitness = -np.min(fitness_lst)
            else:
                self.best_global_fitness = self.rabbit_energy
                self.best_local_fitness = np.min(fitness)

            self.history['local_fitness'].append(self.best_local_fitness)
            self.history['global_fitness'].append(self.best_global_fitness)

            if self.verbose and t % self.nhawks: # change depending on how often message should be displayed
                print(f'HHO step {t*self.nhawks}/{ngen*self.nhawks}, nhawks={self.nhawks}, ncores={self.ncores}')
                print('Best global fitness:', np.round(self.best_global_fitness, 6))
                #mir-grid
                if self.grid_flag:
                    self.rabbit_decoded=decode_discrete_to_grid(self.rabbit_location,self.orig_bounds,self.bounds_map)
                    print('Best rabbit position:', self.rabbit_decoded)
                else:    
                    print('Best rabbit position:', np.round(self.rabbit_location, 6))
                print()

            ################################
            # Update the location of hawks #
            ################################
            self.update_hawks(t, ngen, fitness_lst) # now self.hawk_positions is updated

            for hawk_i in range(self.nhawks):
                #mir: this bound check  line is needed to ensure that choices.remove option to work 
                self.hawk_positions[hawk_i, :] = self.ensure_bounds(self.hawk_positions[hawk_i, :], self.bounds)
                for dim in range(self.dim):
                    if self.var_type[dim] == 'int':
                        self.hawk_positions[hawk_i, dim] = mutate_discrete(x_ij=self.hawk_positions[hawk_i, dim], 
                                           x_min=self.hawk_positions[hawk_i, :].min(), 
                                           x_max=self.hawk_positions[hawk_i, :].max(),
                                           lb=self.lb[dim], 
                                           ub=self.ub[dim], 
                                           alpha=self.a,
                                           method=self.int_transform)

        #mir-grid
        if self.grid_flag:
            self.rabbit_correct=decode_discrete_to_grid(self.rabbit_location,self.orig_bounds,self.bounds_map)
        else:    
            self.rabbit_correct=self.rabbit_location
            
        if self.verbose:
            print('------------------------ HHO Summary --------------------------')
            print('Function:', self.fit.__name__)
            print('Best fitness (y) found:', self.best_global_fitness)
            print('Best individual (x) found:', self.rabbit_correct)
            print('-------------------------------------------------------------- \n \n')

        return self.rabbit_correct, self.best_global_fitness, self.history