Exemplo n.º 1
0
def get_cuckoos(nest,best,lb,ub,n,dim):
    
    # perform Levy flights
    tempnest=numpy.zeros((n,dim))
    tempnest=numpy.array(nest)
    beta=3/2;
    sigma=(math.gamma(1+beta)*math.sin(math.pi*beta/2)/(math.gamma((1+beta)/2)*beta*2**((beta-1)/2)))**(1/beta);

    s=numpy.zeros(dim)
    for j in range (0,n):
        s=nest[j,:]
        u=numpy.random.randn(len(s))*sigma
        v=numpy.random.randn(len(s))
        step=u/abs(v)**(1/beta)
 
        stepsize=0.01*(step*(s-best))

        s=s+stepsize*numpy.random.randn(len(s))
        tempnest[j,:]=transfer_functions_benchmark.s1(s)
        for i in range (0,dim):
            ss= transfer_functions_benchmark.s1(tempnest[j,i])
            if (random.random()<ss): 
               tempnest[j,i]=1;
            else:
               tempnest[j,i]=0;
        
        
        while numpy.sum(tempnest[j,:])==0: 
         tempnest[j,:]=numpy.random.randint(2, size=(1,dim))
        #tempnest[j,:]=numpy.clip(s, lb, ub)
    return tempnest
Exemplo n.º 2
0
def WOA(objf, lb, ub, dim, SearchAgents_no, Max_iter, trainInput, trainOutput):

    #dim=30
    #SearchAgents_no=50
    #lb=-100
    #ub=100
    #Max_iter=500

    # initialize position vector and score for the leader
    Leader_pos = numpy.zeros(dim)
    Leader_score = float("inf")  #change this to -inf for maximization problems

    #Initialize the positions of search agents
    # Positions=numpy.random.uniform(0,1,(SearchAgents_no,dim)) *(ub-lb)+lb #generating continuous individuals
    Positions = numpy.random.randint(
        2, size=(SearchAgents_no, dim))  #generating binary individuals
    #Initialize convergence
    convergence_curve1 = numpy.zeros(Max_iter)
    convergence_curve2 = numpy.zeros(Max_iter)

    ############################
    s = solution()

    print("WOA is optimizing  \"" + objf.__name__ + "\"")

    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    ############################

    t = 0  # Loop counter

    # Main loop
    while t < Max_iter:
        for i in range(0, SearchAgents_no):

            # Return back the search agents that go beyond the boundaries of the search space

            #Positions[i,:]=checkBounds(Positions[i,:],lb,ub)
            # Positions[i,:]=numpy.clip(Positions[i,:], lb, ub)

            # the following statement insures that at least one feature is selected
            #(i.e the randomly generated individual has at least one value 1)
            while numpy.sum(Positions[i, :]) == 0:
                Positions[i, :] = numpy.random.randint(2, size=(1, dim))

            # Calculate objective function for each search agent
            fitness = objf(Positions[i, :], trainInput, trainOutput, dim)

            # Update the leader
            if fitness < Leader_score:  # Change this to > for maximization problem
                Leader_score = fitness
                # Update alpha
                Leader_pos = Positions[i, :].copy(
                )  # copy current whale position into the leader position

            featurecount = 0
            for f in range(0, dim):
                if Leader_pos[f] == 1:
                    featurecount = featurecount + 1

            convergence_curve1[t] = Leader_score
            convergence_curve2[t] = featurecount
            if (t % 1 == 0):
                print([
                    'At iteration ' + str(t) +
                    ' the best fitness on trainig is: ' + str(Leader_score) +
                    'the best number of features: ' + str(featurecount)
                ])

        a = 2 - t * ((2) / Max_iter)
        # a decreases linearly fron 2 to 0 in Eq. (2.3)

        # a2 linearly decreases from -1 to -2 to calculate t in Eq. (3.12)
        a2 = -1 + t * ((-1) / Max_iter)

        # Update the Position of search agents
        for i in range(0, SearchAgents_no):
            r1 = random.random()  # r1 is a random number in [0,1]
            r2 = random.random()  # r2 is a random number in [0,1]

            A = 2 * a * r1 - a  # Eq. (2.3) in the paper
            C = 2 * r2  # Eq. (2.4) in the paper

            b = 1
            #  parameters in Eq. (2.5)
            l = (a2 - 1) * random.random() + 1  #  parameters in Eq. (2.5)

            p = random.random()  # p in Eq. (2.6)

            for j in range(0, dim):

                if p < 0.5:
                    if abs(A) >= 1:
                        rand_leader_index = math.floor(SearchAgents_no *
                                                       random.random())
                        X_rand = Positions[rand_leader_index, :]
                        D_X_rand = abs(C * X_rand[j] - Positions[i, j])
                        Positions[
                            i, j] = X_rand[j] - A * D_X_rand  #update statement
                        Positions[i, j] = transfer_functions_benchmark.v1(
                            Positions[i, j])

                    elif abs(A) < 1:
                        D_Leader = abs(C * Leader_pos[j] - Positions[i, j])
                        Positions[i, j] = Leader_pos[
                            j] - A * D_Leader  #update statement

                        ss = transfer_functions_benchmark.s1(Positions[i, j])

                        if (random.random() < ss):
                            Positions[i, j] = 1
                        else:
                            Positions[i, j] = 0

                elif p >= 0.5:

                    distance2Leader = abs(Leader_pos[j] - Positions[i, j])
                    # Eq. (2.5)
                    Positions[i, j] = distance2Leader * math.exp(
                        b * l) * math.cos(l * 2 * math.pi) + Leader_pos[j]
                    Positions[i, j] = transfer_functions_benchmark.v1(
                        Positions[i, j])

                    ss = transfer_functions_benchmark.s1(Positions[i, j])

                    if (random.random() < ss):
                        Positions[i, j] = 1
                    else:
                        Positions[i, j] = 0

        t = t + 1

    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.bestIndividual = Leader_pos
    s.convergence1 = convergence_curve1
    s.convergence2 = convergence_curve2

    s.optimizer = "WOA"
    s.objfname = objf.__name__

    return s
Exemplo n.º 3
0
def MFO(objf,lb,ub,dim,N,Max_iteration,trainInput,trainOutput):

    
    
    
    
    #Initialize the positions of moths
   # Moth_pos=numpy.random.uniform(0,1,(N,dim)) *(ub-lb)+lb #generating continuous individuals
    Moth_pos=numpy.random.randint(2, size=(N,dim))          #generating binary individuals
    
    Moth_fitness=numpy.full(N,float("inf"))
    #Moth_fitness=numpy.fell(float("inf"))
    
    Convergence_curve1=numpy.zeros(Max_iteration)
    Convergence_curve2=numpy.zeros(Max_iteration)

    
    sorted_population=numpy.copy(Moth_pos)
    fitness_sorted=numpy.zeros(N)
    #####################
    best_flames=numpy.copy(Moth_pos)
    best_flame_fitness=numpy.zeros(N)
    ####################
    double_population=numpy.zeros((2*N,dim))
    double_fitness=numpy.zeros(2*N)
    
    double_sorted_population=numpy.zeros((2*N,dim))
    double_fitness_sorted=numpy.zeros(2*N)
    #########################
    previous_population=numpy.zeros((N,dim));
    previous_fitness=numpy.zeros(N)


    s=solution()

    print("MFO is optimizing  \""+objf.__name__+"\"")    

    timerStart=time.time() 
    s.startTime=time.strftime("%Y-%m-%d-%H-%M-%S")
    
    Iteration=1;    
    
    # Main loop
    while (Iteration<Max_iteration+1):
        
        # Number of flames Eq. (3.14) in the paper
        Flame_no=round(N-Iteration*((N-1)/Max_iteration));
        
        for i in range(0,N):
            
            # Check if moths go out of the search spaceand bring it back
           # Moth_pos[i,:]=numpy.clip(Moth_pos[i,:], lb, ub) 
    
    # the following statement insures that at least one feature is selected
   #(i.e the randomly generated individual has at least one value 1)       
            while numpy.sum(Moth_pos[i,:])==0:   
                 Moth_pos[i,:]=numpy.random.randint(2, size=(1,dim))

            # evaluate moths
            Moth_fitness[i]=objf(Moth_pos[i,:],trainInput,trainOutput,dim)
            
        
           
        if Iteration==1:
            # Sort the first population of moths
            fitness_sorted=numpy.sort(Moth_fitness)
            I=numpy.argsort(Moth_fitness)
            
            sorted_population=Moth_pos[I,:]
               
            
            #Update the flames
            best_flames=sorted_population;
            best_flame_fitness=fitness_sorted;
        else:
    #        
    #        # Sort the moths
            double_population=numpy.concatenate((previous_population,best_flames),axis=0)
            double_fitness=numpy.concatenate((previous_fitness, best_flame_fitness),axis=0);
    #        
            double_fitness_sorted =numpy.sort(double_fitness);
            I2 =numpy.argsort(double_fitness);
    #        
    #        
            for newindex in range(0,2*N):
                double_sorted_population[newindex,:]=numpy.array(double_population[I2[newindex],:])           
            
            fitness_sorted=double_fitness_sorted[0:N]
            sorted_population=double_sorted_population[0:N,:]
    #        
    #        # Update the flames
            best_flames=sorted_population;
            best_flame_fitness=fitness_sorted;
    
    #    
    #   # Update the position best flame obtained so far
        Best_flame_score=fitness_sorted[0]
        Best_flame_pos=sorted_population[0,:]
    #      
        previous_population=Moth_pos;
        previous_fitness=Moth_fitness;
        
        
        
        
        
        
        
        featurecount=0
        for f in range(0,dim):
            if Best_flame_pos[f]==1:
                featurecount=featurecount+1
        
        
#        print(Best_flame_pos)
#        print(Best_flame_score)
#        
#   Convergence_curve[Iteration-1]=(Best_flame_score)
        Convergence_curve1[Iteration-1]=Best_flame_score# store the best number of features
        Convergence_curve2[Iteration-1]=featurecount#store the best fitness on testing returened from F11


      #Display best fitness along the iteration
        if (Iteration%1==0):
            print(['At iteration'+ str(Iteration+1)+' the best fitness on trainig is:'+ str(Best_flame_score)+', the best number of features: '+str(featurecount)]);
         
        
        
        # a linearly dicreases from -1 to -2 to calculate t in Eq. (3.12)
        a=-1+Iteration*((-1)/Max_iteration);
        

        
        # Loop counter
        for i in range(0,N):
    #        
            for j in range(0,dim):
                if (i<=Flame_no): #Update the position of the moth with respect to its corresponsing flame
    #                
                    # D in Eq. (3.13)
                    distance_to_flame=abs(sorted_population[i,j]-Moth_pos[i,j])
                    b=1
                    t=(a-1)*random.random()+1;
    #                
    #                % Eq. (3.12)
                    Moth_pos[i,j]=distance_to_flame*math.exp(b*t)*math.cos(t*2*math.pi)+sorted_population[i,j]#update statement
                    ss= transfer_functions_benchmark.s1(Moth_pos[i,j])
                    
                    if (random.random()<ss): 
                       Moth_pos[i,j]=1;
                    else:
                       Moth_pos[i,j]=0;

    #            end
    #            
                if i>Flame_no: # Upaate the position of the moth with respct to one flame
    #                
    #                % Eq. (3.13)
                    distance_to_flame=abs(sorted_population[i,j]-Moth_pos[i,j]);
                    b=1;
                    t=(a-1)*random.random()+1;
    #                
    #                % Eq. (3.12)
                    Moth_pos[i,j]=distance_to_flame*math.exp(b*t)*math.cos(t*2*math.pi)+sorted_population[Flame_no,j]#update statement
                    ss= transfer_functions_benchmark.s1(Moth_pos[i,j])
                    
                    if (random.random()<ss): 
                       Moth_pos[i,j]=1;
                    else:
                       Moth_pos[i,j]=0;
        #Display best fitness along the iteration
#        if (Iteration%1==0):
#            print(['At iteration '+ str(Iteration)+ ' the best fitness is '+ str(Best_flame_score)]);
#            Convergence_curve[Iteration-1]=(Best_flame_score)

    

    
        Iteration=Iteration+1; 

    timerEnd=time.time()  
    s.endTime=time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime=timerEnd-timerStart
    s.bestIndividual=Best_flame_pos
    s.convergence1=Convergence_curve1
    s.convergence2=Convergence_curve2

    s.optimizer="MFO"
    s.objfname=objf.__name__
    
    
    
    return s
Exemplo n.º 4
0
def HHO(objf, lb, ub, dim, SearchAgents_no, Max_iter, trainInput, trainOutput):

    #dim=30
    #SearchAgents_no=50
    #lb=-100
    #ub=100
    #Max_iter=500

    # initialize the location and Energy of the rabbit
    Rabbit_Location = numpy.zeros(dim)
    Rabbit_Energy = float(
        "inf")  #change this to -inf for maximization problems

    #Initialize the locations of Harris' hawks
    X = numpy.random.randint(2, size=(SearchAgents_no, dim))
    for i in range(0, SearchAgents_no):
        while numpy.sum(X[i, :]) == 0:
            X[i, :] = numpy.random.randint(2, size=(1, dim))
    #Initialize convergence
    convergence_curve1 = numpy.zeros(Max_iter)
    convergence_curve2 = numpy.zeros(Max_iter)

    ############################
    s = solution()

    print("HHO is now tackling  \"" + objf.__name__ + "\"")

    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    ############################

    t = 0  # Loop counter

    # Main loop
    while t < Max_iter:
        for i in range(0, SearchAgents_no):
            # Check boundries

            # X[i,:]=numpy.random.randint(2, size=(1,dim))
            while numpy.sum(X[i, :]) == 0:
                X[i, :] = numpy.random.randint(2, size=(1, dim))

            # fitness of locations
            fitness = objf(X[i, :], trainInput, trainOutput, dim)
            # Update the location of Rabbit
            if fitness < Rabbit_Energy:  # Change this to > for maximization problem
                Rabbit_Energy = fitness
                Rabbit_Location = X[i, :].copy()

        E1 = 2 * (1 - (t / Max_iter)
                  )  # factor to show the decreaing energy of rabbit

        # Update the location of Harris' hawks
        for i in range(0, SearchAgents_no):

            E0 = 2 * random.random() - 1  # -1<E0<1
            Escaping_Energy = E1 * (
                E0)  # escaping energy of rabbit Eq. (3) in the paper

            # -------- Exploration phase Eq. (1) in paper -------------------

            if abs(Escaping_Energy) >= 1:
                #Harris' hawks perch randomly based on 2 strategy:
                q = random.random()
                rand_Hawk_index = math.floor(SearchAgents_no * random.random())
                X_rand = X[rand_Hawk_index, :]
                if q < 0.5:
                    # perch based on other family members
                    X[i, :] = X_rand - random.random() * abs(
                        X_rand - 2 * random.random() * X[i, :])
                    #print(X[i,:])
                    for j in range(0, dim):
                        ss = transfer_functions_benchmark.s1(X[i, j])
                        #print(ss)
                        if (random.random() < ss):
                            X[i, j] = 1
                        else:
                            X[i, j] = 0

                elif q >= 0.5:
                    #perch on a random tall tree (random site inside group's home range)
                    X[i, :] = (Rabbit_Location -
                               X.mean(0)) - random.random() * (
                                   (ub - lb) * random.random() + lb)
                    for j in range(0, dim):
                        ss = transfer_functions_benchmark.s1(X[i, j])
                        if (random.random() < ss):
                            X[i, j] = 1
                        else:
                            X[i, j] = 0

            # -------- Exploitation phase -------------------
            elif abs(Escaping_Energy) < 1:
                #Attacking the rabbit using 4 strategies regarding the behavior of the rabbit

                #phase 1: ----- surprise pounce (seven kills) ----------
                #surprise pounce (seven kills): multiple, short rapid dives by different hawks

                r = random.random()  # probablity of each event

                if r >= 0.5 and abs(Escaping_Energy
                                    ) < 0.5:  # Hard besiege Eq. (6) in paper
                    X[i, :] = (Rabbit_Location) - Escaping_Energy * abs(
                        Rabbit_Location - X[i, :])

                    for j in range(0, dim):
                        ss = transfer_functions_benchmark.s1(X[i, j])

                        if (random.random() < ss):
                            X[i, j] = 1
                        else:
                            X[i, j] = 0

                if r >= 0.5 and abs(Escaping_Energy
                                    ) >= 0.5:  # Soft besiege Eq. (4) in paper
                    Jump_strength = 2 * (
                        1 - random.random()
                    )  # random jump strength of the rabbit
                    X[i, :] = (Rabbit_Location -
                               X[i, :]) - Escaping_Energy * abs(
                                   Jump_strength * Rabbit_Location - X[i, :])
                    for j in range(0, dim):
                        ss = transfer_functions_benchmark.s1(X[i, j])
                        if (random.random() < ss):
                            X[i, j] = 1
                        else:
                            X[i, j] = 0

                #phase 2: --------performing team rapid dives (leapfrog movements)----------

                if r < 0.5 and abs(Escaping_Energy
                                   ) >= 0.5:  # Soft besiege Eq. (10) in paper
                    #rabbit try to escape by many zigzag deceptive motions
                    Jump_strength = 2 * (1 - random.random())
                    X1 = Rabbit_Location - Escaping_Energy * abs(
                        Jump_strength * Rabbit_Location - X[i, :])
                    for j in range(0, dim):
                        ss = transfer_functions_benchmark.s1(X1[j])
                        if (random.random() < ss):
                            X1[j] = 1
                        else:
                            X1[j] = 0
                    if objf(X1, trainInput, trainOutput,
                            dim) < fitness:  # improved move?
                        X[i, :] = X1.copy()

                    else:  # hawks perform levy-based short rapid dives around the rabbit
                        X2 = Rabbit_Location - Escaping_Energy * abs(
                            Jump_strength * Rabbit_Location -
                            X[i, :]) + numpy.multiply(numpy.random.randn(dim),
                                                      Levy(dim))
                        for j in range(0, dim):
                            ss = transfer_functions_benchmark.s1(X2[j])
                            if (random.random() < ss):
                                X2[j] = 1
                            else:
                                X2[j] = 0
                        if objf(X2, trainInput, trainOutput, dim) < fitness:
                            X[i, :] = X2.copy()

                if r < 0.5 and abs(Escaping_Energy
                                   ) < 0.5:  # Hard besiege Eq. (11) in paper
                    Jump_strength = 2 * (1 - random.random())
                    X1 = Rabbit_Location - Escaping_Energy * abs(
                        Jump_strength * Rabbit_Location - X.mean(0))
                    for j in range(0, dim):
                        ss = transfer_functions_benchmark.s1(X1[j])
                        if (random.random() < ss):
                            X1[j] = 1
                        else:
                            X1[j] = 0
                    if objf(X1, trainInput, trainOutput,
                            dim) < fitness:  # improved move?
                        X[i, :] = X1.copy()

                    else:  # Perform levy-based short rapid dives around the rabbit
                        X2 = Rabbit_Location - Escaping_Energy * abs(
                            Jump_strength * Rabbit_Location -
                            X.mean(0)) + numpy.multiply(
                                numpy.random.randn(dim), Levy(dim))
                        for j in range(0, dim):
                            ss = transfer_functions_benchmark.s1(X2[j])
                            if (random.random() < ss):
                                X2[j] = 1
                            else:
                                X2[j] = 0

                        if objf(X2, trainInput, trainOutput, dim) < fitness:
                            X[i, :] = X2.copy()

        featurecount = 0
        for f in range(0, dim):
            if Rabbit_Location[f] == 1:
                featurecount = featurecount + 1

        convergence_curve1[t] = Rabbit_Energy
        convergence_curve2[t] = featurecount

        if (t % 1 == 0):
            print([
                'At iteration ' + str(t) + ' the best fitness is ' +
                str(Rabbit_Energy) + ', the best number of features: ' +
                str(featurecount)
            ])

        t = t + 1

    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.convergence1 = convergence_curve1
    s.convergence2 = convergence_curve2
    s.optimizer = "HHO"
    s.objfname = objf.__name__
    s.best = Rabbit_Energy
    s.bestIndividual = Rabbit_Location

    return s
Exemplo n.º 5
0
def SSA(objf, lb, ub, dim, N, Max_iteration, trainInput, trainOutput):

    #Max_iteration=1000
    #lb=-100
    #ub=100
    #dim=30
    N = 50  # Number of search agents
    if not isinstance(lb, list):
        lb = [lb] * dim
    if not isinstance(ub, list):
        ub = [ub] * dim
    Convergence_curve1 = numpy.zeros(Max_iteration)
    Convergence_curve2 = numpy.zeros(Max_iteration)

    #Initialize the positions of salps
    # SalpPositions = numpy.zeros((N, dim))
    SalpPositions = numpy.random.randint(2, size=(N, dim))

    #for i in range(dim):
    #    SalpPositions[:, i] = numpy.random.uniform(0, 1, N) * (ub[i] - lb[i]) + lb[i]
    SalpFitness = numpy.full(N, float("inf"))

    FoodPosition = numpy.zeros(dim)
    FoodFitness = float("inf")
    #Moth_fitness=numpy.fell(float("inf"))

    s = solution()

    print("SSA is optimizing  \"" + objf.__name__ + "\"")

    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")

    for i in range(0, N):
        # evaluate moths
        while numpy.sum(SalpPositions[i, :]) == 0:
            SalpPositions[i, :] = numpy.random.randint(2, size=(1, dim))
        SalpFitness[i] = objf(SalpPositions[i, :], trainInput, trainOutput,
                              dim)

    sorted_salps_fitness = numpy.sort(SalpFitness)
    I = numpy.argsort(SalpFitness)

    Sorted_salps = numpy.copy(SalpPositions[I, :])

    FoodPosition = numpy.copy(Sorted_salps[0, :])
    FoodFitness = sorted_salps_fitness[0]

    Iteration = 0

    # Main loop
    while (Iteration < Max_iteration):

        # Number of flames Eq. (3.14) in the paper
        #Flame_no=round(N-Iteration*((N-1)/Max_iteration));

        c1 = 2 * math.exp(-(4 * Iteration / Max_iteration)**2)
        # Eq. (3.2) in the paper

        for i in range(0, N):

            SalpPositions = numpy.transpose(SalpPositions)

            if i < N / 2:
                for j in range(0, dim):
                    c2 = random.random()
                    c3 = random.random()
                    #Eq. (3.1) in the paper
                    if c3 < 0.5:
                        SalpPositions[j, i] = FoodPosition[j] + c1 * (
                            (ub[j] - lb[j]) * c2 + lb[j])
                        ss = transfer_functions_benchmark.s1(SalpPositions[j,
                                                                           i])
                        if (random.random() < ss):
                            SalpPositions[j, i] = 1
                        else:
                            SalpPositions[j, i] = 0

                    else:
                        SalpPositions[j, i] = FoodPosition[j] - c1 * (
                            (ub[j] - lb[j]) * c2 + lb[j])
                        ss = transfer_functions_benchmark.s1(SalpPositions[j,
                                                                           i])
                        if (random.random() < ss):
                            SalpPositions[j, i] = 1
                        else:
                            SalpPositions[j, i] = 0

                    ####################

            elif i >= N / 2 and i < N + 1:
                point1 = SalpPositions[:, i - 1]
                point2 = SalpPositions[:, i]

                SalpPositions[:, i] = (point2 + point1) / 2
                # Eq. (3.4) in the paper

                while numpy.sum(SalpPositions[:, i]) == 0:
                    SalpPositions[:, i] = numpy.random.randint(2,
                                                               size=(1, dim))

            SalpPositions = numpy.transpose(SalpPositions)

        for i in range(0, N):

            # Check if salps go out of the search spaceand bring it back
            for j in range(dim):
                SalpPositions[i, j] = numpy.clip(SalpPositions[i, j], lb[j],
                                                 ub[j])
                ss = transfer_functions_benchmark.s1(SalpPositions[i, j])
                if (random.random() < ss):
                    SalpPositions[i, j] = 1
                else:
                    SalpPositions[i, j] = 0

            while numpy.sum(SalpPositions[i, :]) == 0:
                SalpPositions[i, :] = numpy.random.randint(2, size=(1, dim))

            SalpFitness[i] = objf(SalpPositions[i, :], trainInput, trainOutput,
                                  dim)

            if SalpFitness[i] < FoodFitness:
                FoodPosition = numpy.copy(SalpPositions[i, :])
                FoodFitness = SalpFitness[i]

        featurecount = 0
        for f in range(0, dim):
            if FoodPosition[f] == 1:
                featurecount = featurecount + 1

        Convergence_curve1[Iteration] = FoodFitness
        Convergence_curve2[Iteration] = featurecount
        #Display best fitness along the iteration
        if (Iteration % 1 == 0):
            print([
                'At iteration' + str(Iteration + 1) +
                ' the best fitness on trainig is:' + str(FoodFitness) +
                ', the best number of features: ' + str(featurecount)
            ])

        Iteration = Iteration + 1

    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.bestIndividual = FoodPosition
    s.convergence1 = Convergence_curve1
    s.convergence2 = Convergence_curve2
    s.optimizer = "SSA"
    s.objfname = objf.__name__

    return s
Exemplo n.º 6
0
def PSO(objf, lb, ub, dim, PopSize, iters, trainInput, trainOutput):

    # PSO parameters

    #    dim=30
    #    iters=200
    Vmax = 6
    #    PopSize=50     #population size
    wMax = 0.9
    wMin = 0.2
    c1 = 2
    c2 = 2
    #    lb=-10
    #    ub=10
    #
    s = solution()

    ######################## Initializations

    vel = numpy.zeros((PopSize, dim))

    pBestScore = numpy.zeros(PopSize)
    pBestScore.fill(float("inf"))

    pBest = numpy.zeros((PopSize, dim))
    gBest = numpy.zeros(dim)

    gBestScore = float("inf")

    #    pos=numpy.random.uniform(0,1,(PopSize,dim)) *(ub-lb)+lb #generating continuous individuals
    #    for i in range(0,PopSize):
    #         for j in range (0,dim):
    #             if (pos[i,j]<0.5):
    #                pos[i,j]=1;
    #             else:
    #                pos[i,j]=0;
    pos = numpy.random.randint(2, size=(PopSize,
                                        dim))  #generating binary individuals
    convergence_curve1 = numpy.zeros(iters)
    convergence_curve2 = numpy.zeros(iters)

    print("ssssssssssssssssssss")
    ############################################
    print("PSO is optimizing  \"" + objf.__name__ + "\"")

    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")

    for l in range(0, iters):
        for i in range(0, PopSize):
            #pos[i,:]=checkBounds(pos[i,:],lb,ub) --not needed with BPSO
            # pos[i,:]=numpy.clip(pos[i,:], lb, ub)--not needed with BPSO
            # transfer functions will do the boundary checking(individual values either 0 or 1)

            # the following statement insures that at least one feature is selected
            #(i.e the randomly generated individual has at least one value 1)
            while numpy.sum(pos[i, :]) == 0:
                pos[i, :] = numpy.random.randint(2, size=(1, dim))

            #Calculate objective function for each particle
            #fitness=objf(pos[i,:])
            fitness = objf(pos[i, :], trainInput, trainOutput, dim)

            if (pBestScore[i] > fitness):
                pBestScore[i] = fitness
                pBest[i, :] = pos[i, :]

            if (gBestScore > fitness):
                gBestScore = fitness  #best fitness on training returned from F10
                gBest = pos[i, :]

    # print(gBest)
    # print("fitness "+str(gBestScore))
        featurecount = sum(gBest)

        convergence_curve2[
            l] = featurecount  # store the best number of features
        convergence_curve1[
            l] = gBestScore  #store the best fitness on testing returened from F11

        if (l % 1 == 0):

            print([
                'At iteration' + str(l + 1) +
                ' the best fitness on trainig is:' + str(gBestScore) +
                ', the best number of features: ' + str(featurecount)
            ])
#

#Update the W of PSO
        w = wMax - l * ((wMax - wMin) / iters)

        for i in range(0, PopSize):
            for j in range(0, dim):
                r1 = random.random()
                r2 = random.random()
                vel[i, j] = w * vel[i, j] + c1 * r1 * (
                    pBest[i, j] - pos[i, j]) + c2 * r2 * (gBest[j] - pos[i, j])

                if (vel[i, j] > Vmax):
                    vel[i, j] = Vmax

                if (vel[i, j] < -Vmax):
                    vel[i, j] = -Vmax

                pos[i, j] = (pos[i, j] + vel[i, j])  #update statement
                # print("vel "+str(vel[i,j]))
                #print("pos "+str( pos[i,j]))
                #  time.sleep(2)
                ss = transfer_functions_benchmark.s1(
                    pos[i, j])  #transfer function
                #print(transfer_functions_benchmark.s1(pos[i,j]))
                #time.sleep(2)

                if (random.random() < ss):
                    pos[i, j] = 1
                else:
                    pos[i, j] = 0

            # print(("jjjjj"+str(pos[i,j])))

    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.bestIndividual = gBest
    s.convergence1 = convergence_curve1
    s.convergence2 = convergence_curve2

    s.optimizer = "PSO"
    s.objfname = objf.__name__

    return s
Exemplo n.º 7
0
def  MVO(objf,lb,ub,dim,N,Max_time,trainInput,trainOutput):
    "parameters"
    #dim=30
    #lb=-100
    #ub=100
    WEP_Max=1;
    WEP_Min=0.2
    #Max_time=1000
    #N=50
    
    
    #initialization stage of the population(either continuous or discrete binary individual generation)
    
   # Universes=numpy.random.uniform(0,1,(N,dim)) *(ub-lb)+lb 
    
   
   # this statement for generating of continuous individuals 
    
    Universes=numpy.random.randint(2, size=(N,dim))  
    #this statement for generating binary individuals with discrete values either 0 or 1 ---[0,2)
    #suitable for feature selection problem
    #this can be done in another way by generating continuous individuals and 
    #then applying threshold based conversion to convert the values of the individual 
    #above certain threshold to 1 and the values below the threshold to 0
  
         


    Sorted_universes=numpy.copy(Universes)
    
    convergence1=numpy.zeros(Max_time)
    convergence2=numpy.zeros(Max_time)

    
    Best_universe=[0]*dim;
    Best_universe_Inflation_rate= float("inf")
    
    
    
    
    s=solution()

    
    Time=1;
    ############################################
    print("MVO is optimizing  \""+objf.__name__+"\"")    
    
    timerStart=time.time() 
    s.startTime=time.strftime("%Y-%m-%d-%H-%M-%S")
    while (Time<Max_time+1):
    
        "Eq. (3.3) in the paper"
        WEP=WEP_Min+Time*((WEP_Max-WEP_Min)/Max_time)
       
        TDR=1-(math.pow(Time,1/6)/math.pow(Max_time,1/6))
      
        Inflation_rates=[0]*len(Universes)
        
       
       
        for i in range(0,N):
          #  Universes[i,:]=numpy.clip(Universes[i,:], lb, ub)
    
   # the following statement insures that at least one feature is selected
   #(i.e the randomly generated individual has at least one value 1)       
           
            while numpy.sum(Universes[i,:])==0:   
                 Universes[i,:]=numpy.random.randint(2, size=(1,dim))
                
    
            Inflation_rates[i]=objf(Universes[i,:],trainInput,trainOutput,dim);
           
       
               
            if Inflation_rates[i]<Best_universe_Inflation_rate :
                        
                Best_universe_Inflation_rate=Inflation_rates[i]
                Best_universe=numpy.array(Universes[i,:])
                
                
            featurecount=0
            for f in range(0,dim):
             if Best_universe[f]==1:
                featurecount=featurecount+1
                
            convergence1[Time-1]=Best_universe_Inflation_rate# store the best number of features
            convergence2[Time-1]=featurecount#store the best fitness on testing returened from F11

            
                
        if (Time%1==0):
               print(['At iteration '+ str(Time)+ ' the best fitness on trainig is: '+ str(Best_universe_Inflation_rate)+', the best number of features: '+str(featurecount)]);

             
             
             
             
             
        
        sorted_Inflation_rates = numpy.sort(Inflation_rates)
        sorted_indexes = numpy.argsort(Inflation_rates)
        
        for newindex in range(0,N):
            Sorted_universes[newindex,:]=numpy.array(Universes[sorted_indexes[newindex],:])   
            
        normalized_sorted_Inflation_rates=numpy.copy(normr(sorted_Inflation_rates))
    
        
        Universes[0,:]= numpy.array(Sorted_universes[0,:])
    
        for i in range(1,N):
            Back_hole_index=i
            for j in range(0,dim):
                r1=random.random()
                
                if r1<normalized_sorted_Inflation_rates[i]:
                    White_hole_index=RouletteWheelSelection(-sorted_Inflation_rates);
    
                    if White_hole_index==-1:
                        White_hole_index=0;
                    White_hole_index=0;
                    Universes[Back_hole_index,j]=Sorted_universes[White_hole_index,j];
            
            
            
            #update statemnts of the universe using transfer functions instead of conventional operators
                r2=random.random() 
                
                
                if r2<WEP:
                    r3=random.random() 
                    if r3<0.5:                    
#                        Universes[i,j]=Best_universe[j]+TDR*((ub-lb)*random.random()+lb) #random.uniform(0,1)+lb);
                         Universes[i,j]=Best_universe[j]+TDR*random.random()
                        # Universes[i,j]=1 / (1. + numpy.exp(- 10 * ( Universes[i,j] - .5)))
                         ss= transfer_functions_benchmark.s1(Universes[i,j])
                    
                         if (random.random()<ss): 
                            Universes[i,j]=1;
                         else:
                            Universes[i,j]=0;
                         
                    if r3>0.5:          
#                        Universes[i,j]=Best_universe[j]-TDR*((ub-lb)*random.random()+lb) #random.uniform(0,1)+lb);
                         Universes[i,j]=Best_universe[j]-TDR*random.random()
                        # Universes[i,j]=1 / (1. + numpy.exp(- 10 * ( Universes[i,j] - .5)))
                         ss= transfer_functions_benchmark.s1(Universes[i,j])
                    
                         if (random.random()<ss): 
                            Universes[i,j]=1;
                         else:
                            Universes[i,j]=0;
         
        
      
        
        
        
        Time=Time+1
  
    
    timerEnd=time.time()  
    s.endTime=time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime=timerEnd-timerStart
    s.bestIndividual=Best_universe
    s.convergence1=convergence1
    s.convergence2=convergence2

    s.optimizer="MVO"
    s.objfname=objf.__name__

    return s
Exemplo n.º 8
0
def GWO(objf, lb, ub, dim, SearchAgents_no, Max_iter, trainInput, trainOutput):

    #Max_iter=1000
    #lb=-100
    #ub=100
    #dim=30
    #SearchAgents_no=5

    # initialize alpha, beta, and delta_pos
    Alpha_pos = numpy.zeros(dim)
    Alpha_score = float("inf")

    Beta_pos = numpy.zeros(dim)
    Beta_score = float("inf")

    Delta_pos = numpy.zeros(dim)
    Delta_score = float("inf")

    #initialization stage of positions of the search agents(either continuous or discrete (binary) individual generation)
    # Positions=numpy.random.uniform(0,1,(SearchAgents_no,dim)) *(ub-lb)+lb #generating continuous individuals

    Positions = numpy.random.randint(
        2, size=(SearchAgents_no, dim))  #generating binary individuals

    Convergence_curve1 = numpy.zeros(Max_iter)
    Convergence_curve2 = numpy.zeros(Max_iter)

    s = solution()

    # Loop counter
    print("GWO is optimizing  \"" + objf.__name__ + "\"")

    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    # Main loop
    for l in range(0, Max_iter):
        for i in range(0, SearchAgents_no):

            # Return back the search agents that go beyond the boundaries of the search space
            Positions[i, :] = numpy.clip(Positions[i, :], lb, ub)

            # the following statement insures that at least one feature is selected
            #(i.e the randomly generated individual has at least one value 1)
            while numpy.sum(Positions[i, :]) == 0:
                Positions[i, :] = numpy.random.randint(2, size=(1, dim))

            # Calculate objective function for each search agent
            fitness = objf(Positions[i, :], trainInput, trainOutput, dim)

            # Update Alpha, Beta, and Delta
            if fitness < Alpha_score:
                Alpha_score = fitness
                # Update alpha
                Alpha_pos = Positions[i, :].copy()

            if (fitness > Alpha_score and fitness < Beta_score):
                Beta_score = fitness  # Update beta
                Beta_pos = Positions[i, :].copy()

            if (fitness > Alpha_score and fitness > Beta_score
                    and fitness < Delta_score):
                Delta_score = fitness  # Update delta
                Delta_pos = Positions[i, :].copy()

        a = 2 - l * ((2) / Max_iter)
        # a decreases linearly fron 2 to 0

        # Update the Position of search agents including omegas
        for i in range(0, SearchAgents_no):
            for j in range(0, dim):

                r1 = random.random()  # r1 is a random number in [0,1]
                r2 = random.random()  # r2 is a random number in [0,1]

                A1 = 2 * a * r1 - a
                # Equation (3.3)
                C1 = 2 * r2
                # Equation (3.4)

                D_alpha = abs(C1 * Alpha_pos[j] - Positions[i, j])
                # Equation (3.5)-part 1
                # X1=Alpha_pos[j]-A1*D_alpha; # Equation (3.6)-part 1
                temp = transfer_functions_benchmark.s1(A1 * D_alpha)
                if temp < numpy.random.uniform(0, 1):
                    temp = 0
                else:
                    temp = 1
                if (Alpha_pos[j] + temp) >= 1:
                    X1 = Alpha_pos[j] + temp

                r1 = random.random()
                r2 = random.random()

                A2 = 2 * a * r1 - a
                # Equation (3.3)
                C2 = 2 * r2
                # Equation (3.4)

                D_beta = abs(C2 * Beta_pos[j] - Positions[i, j])
                # Equation (3.5)-part 2
                #  X2=Beta_pos[j]-A2*D_beta; # Equation (3.6)-part 2
                temp = transfer_functions_benchmark.s1(A2 * D_beta)

                if temp < numpy.random.uniform(0, 1):
                    temp = 0
                else:
                    temp = 1

                if (Beta_pos[j] + temp) >= 1:
                    X2 = Beta_pos[j] + temp

                r1 = random.random()
                r2 = random.random()

                A3 = 2 * a * r1 - a
                # Equation (3.3)
                C3 = 2 * r2
                # Equation (3.4)

                D_delta = abs(C3 * Delta_pos[j] - Positions[i, j])
                # Equation (3.5)-part 3
                # X3=Delta_pos[j]-A3*D_delta; # Equation (3.5)-part 3

                temp = transfer_functions_benchmark.s1(A3 * D_delta)
                if temp < numpy.random.uniform(0, 1):
                    temp = 0
                else:
                    temp = 1

                if (Delta_pos[j] + temp) >= 1:
                    X3 = Delta_pos[j] + temp

            Positions[i, j] = (X1 + X2 + X3) / 3  # Equation (3.7)

        featurecount = 0
        for f in range(0, dim):
            if Alpha_pos[f] == 1:
                featurecount = featurecount + 1

        Convergence_curve1[l] = Alpha_score
        Convergence_curve2[l] = featurecount
        if (l % 1 == 0):
            print([
                'At iteration' + str(l + 1) +
                ' the best fitness on trainig is:' + str(Alpha_score) +
                ', the best number of features: ' + str(featurecount)
            ])

    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.bestIndividual = Alpha_pos
    s.convergence1 = Convergence_curve1
    s.convergence2 = Convergence_curve2

    s.optimizer = "GWO"
    s.objfname = objf.__name__

    return s
Exemplo n.º 9
0
def FFA(objf, lb, ub, dim, n, MaxGeneration, trainInput, trainOutput):

    #General parameters

    #n=50 #number of fireflies
    #dim=30 #dim
    #lb=-50
    #ub=50
    #MaxGeneration=500

    #FFA parameters
    alpha = 0.5  # Randomness 0--1 (highly random)
    betamin = 0.20  # minimum value of beta
    gamma = 1  # Absorption coefficient

    zn = numpy.ones(n)
    zn.fill(float("inf"))

    #ns(i,:)=Lb+(Ub-Lb).*rand(1,d);
    # ns=numpy.random.uniform(0,1,(n,dim)) *(ub-lb)+lb #generating continuous individuals

    ns = numpy.random.randint(2, size=(n, dim))  #generating binary individuals

    Lightn = numpy.ones(n)
    Lightn.fill(float("inf"))

    #[ns,Lightn]=init_ffa(n,d,Lb,Ub,u0)

    convergence1 = []
    convergence2 = []

    s = solution()

    print("FFA is optimizing  \"" + objf.__name__ + "\"")

    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")

    # Main loop
    for k in range(0, MaxGeneration):  # start iterations

        #% This line of reducing alpha is optional
        alpha = alpha_new(alpha, MaxGeneration)

        #% Evaluate new solutions (for all n fireflies)
        for i in range(0, n):
            # the following statement insures that at least one feature is selected
            #(i.e the randomly generated individual has at least one value 1)
            while numpy.sum(ns[i, :]) == 0:
                ns[i, :] = numpy.random.randint(2, size=(1, dim))

            zn[i] = objf(ns[i, :], trainInput, trainOutput, dim)
            Lightn[i] = zn[i]

        # Ranking fireflies by their light intensity/objectives

        Lightn = numpy.sort(zn)
        Index = numpy.argsort(zn)
        ns = ns[Index, :]

        #Find the current best
        nso = ns
        Lighto = Lightn
        nbest = ns[0, :]
        Lightbest = Lightn[0]

        #% For output only
        fbest = Lightbest

        BestQuality = fbest

        featurecount = 0
        for f in range(0, dim):
            if nbest[f] == 1:
                featurecount = featurecount + 1

        convergence1.append(BestQuality)
        convergence2.append(featurecount)

        if (k % 1 == 0):
            print([
                'At iteration ' + str(k) + ' the best fitness on trainig is ' +
                str(BestQuality) + ', the best number of features: ' +
                str(featurecount)
            ])

        #% Move all fireflies to the better locations
    #    [ns]=ffa_move(n,d,ns,Lightn,nso,Lighto,nbest,...
    #          Lightbest,alpha,betamin,gamma,Lb,Ub);
        scale = numpy.ones(dim) * abs(ub - lb)
        for i in range(0, n):
            # The attractiveness parameter beta=exp(-gamma*r)
            for j in range(0, n):
                r = numpy.sqrt(numpy.sum((ns[i, :] - ns[j, :])**2))
                #r=1
                # Update moves
                if Lightn[i] > Lighto[j]:  # Brighter and more attractive
                    beta0 = 1
                    beta = (beta0 - betamin) * math.exp(
                        -gamma * r**2) + betamin
                    tmpf = alpha * (numpy.random.rand(dim) - 0.5) * scale
                    ns[i, :] = ns[i, :] * (
                        1 - beta) + nso[j, :] * beta + tmpf  #update statement
                    for j in range(0, dim):
                        ss = transfer_functions_benchmark.s1(ns[i, j])

                        if (random.random() < ss):
                            ns[i, j] = 1
                        else:
                            ns[i, j] = 0

        #ns=numpy.clip(ns, lb, ub)

    #
    ####################### End main loop
    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.bestIndividual = nbest
    s.convergence1 = convergence1
    s.convergence2 = convergence2

    s.optimizer = "FFA"
    s.objfname = objf.__name__

    return s
Exemplo n.º 10
0
def BAT(objf, lb, ub, dim, N, Max_iteration, trainInput, trainOutput):

    n = N
    # Population size
    #lb=-50
    #ub=50
    N_gen = Max_iteration  # Number of generations

    A = 0.5
    # Loudness  (constant or decreasing)
    r = 0.5
    # Pulse rate (constant or decreasing)

    Qmin = 0  # Frequency minimum
    Qmax = 2  # Frequency maximum

    d = dim  # Number of dimensions

    # Initializing arrays
    Q = numpy.zeros(n)  # Frequency
    v = numpy.zeros((n, d))  # Velocities
    Convergence_curve1 = []
    Convergence_curve2 = []

    # Initialize the population/solutions

    # Sol=numpy.random.rand(n,d)*(ub-lb)+lb      #generating continuous individuals

    Sol = numpy.random.randint(2, size=(n, d))  #generating binary individuals
    # the following statement insures that at least one feature is selected
    #(i.e the randomly generated individual has at least one value 1)

    for i in range(0, n):
        while numpy.sum(Sol[i, :]) == 0:
            Sol[i, :] = numpy.random.randint(2, size=(1, d))

    S = numpy.zeros((n, d))
    S = numpy.copy(Sol)
    Fitness = numpy.zeros(n)

    # initialize solution for the final results
    s = solution()
    print("BAT is optimizing  \"" + objf.__name__ + "\"")

    # Initialize timer for the experiment
    timerStart = time.time()
    s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")

    #Evaluate initial random solutions
    for i in range(0, n):
        Fitness[i] = objf(S[i, :], trainInput, trainOutput, dim)

    # Find the initial best solution
    fmin = min(Fitness)
    I = numpy.argmin(Fitness)
    best = Sol[I, :]

    # Main loop
    for t in range(0, N_gen):

        # Loop over all bats(solutions)
        for i in range(0, n):

            # for i in range(0,n):
            # while numpy.sum(S[i,:])==0:
            # S[i,:]=numpy.random.randint(2, size=(1,d))
            Q[i] = Qmin + (Qmin - Qmax) * random.random()
            v[i, :] = v[i, :] + (Sol[i, :] - best) * Q[i]
            S[i, :] = Sol[i, :] + v[i, :]

            # Check boundaries
            #  Sol=numpy.clip(Sol,lb,ub)

            # Pulse rate
            if random.random() > r:
                S[i, :] = best + 0.001 * numpy.random.randn(
                    d)  #update statement

            for f in range(0, dim):
                ss = transfer_functions_benchmark.s1(S[i,
                                                       f])  #transfer function
                if (random.random() < ss):
                    S[i, f] = 1
                else:
                    S[i, f] = 0

            for i in range(0, n):
                while numpy.sum(S[i, :]) == 0:
                    S[i, :] = numpy.random.randint(2, size=(1, d))

            # Evaluate new solutions
            Fnew = objf(S[i, :], trainInput, trainOutput, dim)

            # Update if the solution improves
            if ((Fnew <= Fitness[i]) and (random.random() < A)):
                Sol[i, :] = numpy.copy(S[i, :])

                Fitness[i] = Fnew

            # Update the current best solution
            if Fnew <= fmin:
                best = S[i, :]
                fmin = Fnew

            featurecount = 0
            for f in range(0, dim):
                if best[f] == 1:
                    featurecount = featurecount + 1

        #update convergence curve
        Convergence_curve1.append(fmin)
        Convergence_curve2.append(featurecount)

        if (t % 1 == 0):
            print([
                'At iteration' + str(t + 1) +
                ' the best fitness on trainig is:' + str(fmin) +
                ', the best number of features: ' + str(featurecount)
            ])

    timerEnd = time.time()
    s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
    s.executionTime = timerEnd - timerStart
    s.bestIndividual = best
    s.convergence1 = Convergence_curve1
    s.convergence2 = Convergence_curve2

    s.optimizer = "BAT"
    s.objfname = objf.__name__

    return s