Esempio n. 1
0
File: EO.py Progetto: ssen110/Py_FS
def EO(num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, trans_func_shape='s', save_conv_graph=False):
    
    # Equilibrium Optimizer
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of particles                                           #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #                
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################
    
    short_name = 'EO'
    agent_name = 'Particle'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_func_shape)

    # initialize particles and Leader (the agent with the max fitness)
    particles = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")
    pool_size = 4
    omega = 0.9                 
    a2 = 1
    a1 = 2
    GP = 0.5

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # format the data
    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=0.2)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial particles
    particles, fitness = sort_agents(particles, obj_function, data)

    # start timer
    start_time = time.time()

    # pool initialization
    eq_pool = np.zeros((pool_size+1, num_features))
    eq_fitness = np.zeros(pool_size)
    eq_fitness[:] = float("-inf")

    for iter_no in range(max_iter):
        print('\n================================================================================')
        print('                          Iteration - {}'.format(iter_no+1))
        print('================================================================================\n')     

        # replacements in the pool
        for i in range(num_agents):
            for j in range(pool_size):                 
                if fitness[i] <= eq_fitness[j]:
                    eq_fitness[j] = fitness[i].copy()
                    eq_pool[j, :] = particles[i, :].copy()
                    break
        

        best_particle = eq_pool[0,:]
                
        Cave = avg_concentration(eq_pool, pool_size, num_features)
        eq_pool[pool_size] = Cave.copy()

        t = (1 - (iter_no/max_iter)) ** (a2*iter_no/max_iter)
        
        for i in range(num_agents):
            
            # randomly choose one candidate from the equillibrium pool
            inx = np.random.randint(0,pool_size)
            Ceq = np.array(eq_pool[inx])

            lambda_vec = np.zeros(np.shape(Ceq))
            r_vec = np.zeros(np.shape(Ceq))
            for j in range(num_features):
                lambda_vec[j] = np.random.random()
                r_vec[j] = np.random.random()

            F_vec = np.zeros(np.shape(Ceq))
            for j in range(num_features):
                x = -1*lambda_vec[j]*t 
                x = np.exp(x) - 1
                x = a1 * sign_func(r_vec[j] - 0.5) * x

            r1, r2 = np.random.random(2)
            if r2 < GP:
                GCP = 0
            else:
                GCP = 0.5 * r1
            G0 = np.zeros(np.shape(Ceq))
            G = np.zeros(np.shape(Ceq))
            for j in range(num_features):
                G0[j] = GCP * (Ceq[j] - lambda_vec[j]*particles[i][j])
                G[j] = G0[j]*F_vec[j]
            
            # use transfer function to map continuous->binary
            for j in range(num_features):
                temp = Ceq[j] + (particles[i][j] - Ceq[j])*F_vec[j] + G[j]*(1 - F_vec[j])/lambda_vec[j]                
                temp = trans_function(temp)                
                if temp>np.random.random():
                    particles[i][j] = 1 - particles[i][j]
                else:
                    particles[i][j] = particles[i][j]          

        # update final information
        particles, fitness = sort_agents(particles, obj_function, data)
        display(particles, fitness, agent_name)
        
        # update Leader (best agent)
        if fitness[0] > Leader_fitness:
            Leader_agent = particles[0].copy()
            Leader_fitness = fitness[0].copy()

        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
    particles, accuracy = sort_agents(particles, compute_accuracy, data)

    print('\n================================================================================')
    print('                                    Final Result                                  ')
    print('================================================================================\n')
    print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
    print('\n================================================================================\n')

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence curves
    iters = np.arange(max_iter)+1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad=5)
    fig.suptitle('Convergence Curves')

    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if(save_conv_graph):
        plt.savefig('convergence_graph_'+ short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_particles = particles
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 2
0
def SCA(num_agents,
        max_iter,
        train_data,
        train_label,
        obj_function=compute_fitness,
        trans_func_shape='s',
        save_conv_graph=False):

    # Sine Cosine Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of agents                                              #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'SCA'
    agent_name = 'Agent'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_func_shape)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize agents and Leader (the agent with the max fitness)
    population = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # initialize data class
    data = Data()
    val_size = float(
        input('Enter the percentage of data wanted for valdiation [0, 100]: ')
    ) / 100
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=val_size)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial population
    population, fitness = sort_agents(population, obj, data)
    Leader_agent = population[0].copy()
    Leader_fitness = fitness[0].copy()

    # start timer
    start_time = time.time()

    # Eq. (3.4)
    a = 3

    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        # Eq. (3.4)
        r1 = a - iter_no * (
            (a) / max_iter)  # r1 decreases linearly from a to 0

        # update the Position of search agents
        for i in range(num_agents):
            for j in range(num_features):

                # update r2, r3, and r4 for Eq. (3.3)
                r2 = (2 * np.pi) * np.random.random()
                r3 = 2 * np.random.random()
                r4 = np.random.random()

                # Eq. (3.3)
                if r4 < 0.5:
                    # Eq. (3.1)
                    population[i, j] = population[i, j] + \
                        (r1*np.sin(r2)*abs(r3*Leader_agent[j]-population[i, j]))
                else:
                    # Eq. (3.2)
                    population[i, j] = population[i, j] + \
                        (r1*np.cos(r2)*abs(r3*Leader_agent[j]-population[i, j]))

                temp = population[i, j].copy()
                temp = trans_function(temp)
                if temp > np.random.random():
                    population[i, j] = 1
                else:
                    population[i, j] = 0

        # update final information
        population, fitness = sort_agents(population, obj, data)
        display(population, fitness)

        if fitness[0] > Leader_fitness:
            Leader_agent = population[0].copy()
            Leader_fitness = fitness[0].copy()

        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    population, accuracy = sort_agents(population, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence curves
    iters = np.arange(max_iter) + 1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad=5)
    fig.suptitle('Convergence Curves')

    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_particles = population
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 3
0
File: RDA.py Progetto: ssen110/Py_FS
def RDA(num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, trans_function_shape='s', save_conv_graph=False):

    # Red Deer Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of red deers                                           #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #                
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################
    
    # Number of agents must be at least 8
    if num_agents < 8:
        print("The parameter num_agents must be at least 8", file=sys.stderr)
        sys.exit()
        
    short_name = 'RDA'
    agent_name = 'RedDeer'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_function_shape)

    # initialize red deers and Leader (the agent with the max fitness)
    deer = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # format the data 
    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(train_data, train_label, stratify=train_label, test_size=0.2)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function
    
    # initializing parameters
    UB = 5 # Upper bound
    LB = -5 # Lower bound
    gamma = 0.5 # Fraction of total number of males who are chosen as commanders
    alpha = 0.2 # Fraction of total number of hinds in a harem who mate with the commander of their harem
    beta = 0.1 # Fraction of total number of hinds in a harem who mate with the commander of a different harem

    # start timer
    start_time = time.time()

    # main loop
    for iter_no in range(max_iter):
        print('\n================================================================================')
        print('                          Iteration - {}'.format(iter_no+1))
        print('================================================================================\n')
        
        deer, fitness = sort_agents(deer, obj_function, data)
        num_males = int(0.25 * num_agents)
        num_hinds = num_agents - num_males
        males = deer[:num_males,:]
        hinds = deer[num_males:,:]
        
        # roaring of male deer
        for i in range(num_males):
            r1 = np.random.random() # r1 is a random number in [0, 1]
            r2 = np.random.random() # r2 is a random number in [0, 1]
            r3 = np.random.random() # r3 is a random number in [0, 1]
            new_male = males[i].copy()
            if r3 >= 0.5:                                    # Eq. (3)
                new_male += r1 * (((UB - LB) * r2) + LB)
            else:
                new_male -= r1 * (((UB - LB) * r2) + LB)
                 
            # apply transformation function on the new male
            for j in range(num_features):
                trans_value = trans_function(new_male[j])
                if (np.random.random() < trans_value): 
                    new_male[j] = 1
                else:
                    new_male[j] = 0
                    
            if obj_function(new_male, data.train_X, data.val_X, data.train_Y, data.val_Y) < obj_function(males[i], data.train_X, data.val_X, data.train_Y, data.val_Y):
                males[i] = new_male
        
        
        # selection of male commanders and stags
        num_coms = int(num_males * gamma) # Eq. (4)
        num_stags = num_males - num_coms # Eq. (5)

        coms = males[:num_coms,:]
        stags = males[num_coms:,:]
        
        # fight between male commanders and stags       
        for i in range(num_coms):
            chosen_com = coms[i].copy()
            chosen_stag = random.choice(stags)
            r1 = np.random.random()
            r2 = np.random.random()
            new_male_1 = (chosen_com + chosen_stag) / 2 + r1 * (((UB - LB) * r2) + LB) # Eq. (6)
            new_male_2 = (chosen_com + chosen_stag) / 2 - r1 * (((UB - LB) * r2) + LB) # Eq. (7)
            
            # apply transformation function on new_male_1
            for j in range(num_features):
                trans_value = trans_function(new_male_1[j])
                if (np.random.random() < trans_value): 
                    new_male_1[j] = 1
                else:
                    new_male_1[j] = 0
             
            # apply transformation function on new_male_2
            for j in range(num_features):
                trans_value = trans_function(new_male_2[j])
                if (np.random.random() < trans_value): 
                    new_male_2[j] = 1
                else:
                    new_male_2[j] = 0
                    
            fitness = np.zeros(4)
            fitness[0] = obj_function(chosen_com, data.train_X, data.val_X, data.train_Y, data.val_Y)
            fitness[1] = obj_function(chosen_stag, data.train_X, data.val_X, data.train_Y, data.val_Y)
            fitness[2] = obj_function(new_male_1, data.train_X, data.val_X, data.train_Y, data.val_Y)
            fitness[3] = obj_function(new_male_2, data.train_X, data.val_X, data.train_Y, data.val_Y)
            
            bestfit = np.max(fitness)
            if fitness[0] < fitness[1] and fitness[1] == bestfit:
                coms[i] = chosen_stag.copy()
            elif fitness[0] < fitness[2] and fitness[2] == bestfit:
                coms[i] = new_male_1.copy()
            elif fitness[0] < fitness[3] and fitness[3] == bestfit:
                coms[i] = new_male_2.copy()

        # formation of harems
        coms, fitness = sort_agents(coms, obj_function, data)
        norm = np.linalg.norm(fitness)
        normal_fit = fitness / norm
        total = np.sum(normal_fit)
        power = normal_fit / total # Eq. (9)
        num_harems = [int(x * num_hinds) for x in power] # Eq.(10)
        max_harem_size = np.max(num_harems)
        harem = np.empty(shape=(num_coms, max_harem_size, num_features))
        random.shuffle(hinds)
        itr = 0
        for i in range(num_coms):
            harem_size = num_harems[i]
            for j in range(harem_size):
                harem[i][j] = hinds[itr]
                itr += 1
        
        # mating of commander with hinds in his harem
        num_harem_mate = [int(x * alpha) for x in num_harems] # Eq. (11)
        population_pool = list(deer)
        for i in range(num_coms):
            random.shuffle(harem[i])
            for j in range(num_harem_mate[i]):
                r = np.random.random() # r is a random number in [0, 1]
                offspring = (coms[i] + harem[i][j]) / 2 + (UB - LB) * r # Eq. (12)
                
                # apply transformation function on offspring
                for j in range(num_features):
                    trans_value = trans_function(offspring[j])
                    if (np.random.random() < trans_value): 
                        offspring[j] = 1
                    else:
                        offspring[j] = 0
                population_pool.append(list(offspring))
                
                # if number of commanders is greater than 1, inter-harem mating takes place
                if num_coms > 1:
                    # mating of commander with hinds in another harem
                    k = i 
                    while k == i:
                        k = random.choice(range(num_coms))

                    num_mate = int(num_harems[k] * beta) # Eq. (13)

                    np.random.shuffle(harem[k])
                    for j in range(num_mate):
                        r = np.random.random() # r is a random number in [0, 1]
                        offspring = (coms[i] + harem[k][j]) / 2 + (UB - LB) * r 
                        # apply transformation function on offspring
                        for j in range(num_features):
                            trans_value = trans_function(offspring[j])
                            if (np.random.random() < trans_value): 
                                offspring[j] = 1
                            else:
                                offspring[j] = 0
                        population_pool.append(list(offspring))
        
        # mating of stag with nearest hind
        for stag in stags:
            dist = np.zeros(num_hinds)
            for i in range(num_hinds):
                dist[i] = math.sqrt(np.sum((stag-hinds[i])*(stag-hinds[i])))
            min_dist = np.min(dist)
            for i in range(num_hinds):
                distance = math.sqrt(np.sum((stag-hinds[i])*(stag-hinds[i]))) # Eq. (14)
                if(distance == min_dist):
                    r = np.random.random() # r is a random number in [0, 1]
                    offspring = (stag + hinds[i])/2 + (UB - LB) * r
                    
                    # apply transformation function on offspring
                    for j in range(num_features):
                        trans_value = trans_function(offspring[j])
                        if (np.random.random() < trans_value): 
                            offspring[j] = 1
                        else:
                            offspring[j] = 0
                    population_pool.append(list(offspring))
                    
                    break
        
        # selection of the next generation
        population_pool = np.array(population_pool)            
        population_pool, fitness = sort_agents(population_pool, obj_function, data)
        maximum = sum([f for f in fitness])
        selection_probs = [f/maximum for f in fitness]
        indices = np.random.choice(len(population_pool), size=num_agents, replace=True, p=selection_probs)          
        deer = population_pool[indices]
        
        # update final information
        deer, fitness = sort_agents(deer, obj_function, data)
        display(deer, fitness, agent_name)
        if fitness[0] > Leader_fitness:
            Leader_agent = deer[0].copy()
            Leader_fitness = fitness[0].copy()
        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
    deer, accuracy = sort_agents(deer, compute_accuracy, data)

    print('\n================================================================================')
    print('                                    Final Result                                  ')
    print('================================================================================\n')
    print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
    print('\n================================================================================\n')

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time
    
    # plot convergence curves
    iters = np.arange(max_iter)+1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad = 5) 
    fig.suptitle('Convergence Curves')
    
    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if(save_conv_graph):
        plt.savefig('convergence_graph_'+ short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = deer
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time
    return solution
Esempio n. 4
0
def GSA(num_agents,
        max_iter,
        train_data,
        train_label,
        obj_function=compute_fitness,
        trans_function_shape='s',
        save_conv_graph=False):

    # Gravitational Search Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of particles                                           #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    agent_name = 'Particle'
    short_name = 'GSA'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_function_shape)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize positionss of particles and Leader (the agent with the max fitness)
    positions = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)

    # initialize data class
    data = Data()
    val_size = float(
        input('Enter the percentage of data wanted for valdiation [0, 100]: ')
    ) / 100
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=val_size)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # initializing parameters
    eps = 0.00001
    G_ini = 6
    F = np.zeros((num_agents, num_agents, num_features))
    R = np.zeros((num_agents, num_agents))
    force = np.zeros((num_agents, num_features))
    acc = np.zeros((num_agents, num_features))
    velocity = np.zeros((num_agents, num_features))
    kBest = range(5)

    # rank initial population
    positions, fitness = sort_agents(positions, obj, data)

    # start timer
    start_time = time.time()

    # main loop
    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        # updating value of G
        G = G_ini - iter_no * (G_ini / max_iter)  # Eq. (13)

        # finding mass of each particle
        best_fitness = fitness[0]
        worst_fitness = fitness[-1]
        m = (fitness - worst_fitness) / (best_fitness - worst_fitness + eps
                                         )  # Eq. (15)
        sum_fitness = np.sum(m)
        mass = m / sum_fitness  # Eq. (16)

        # finding force acting between each pair of particles
        for i in range(num_agents):
            for j in range(num_agents):
                for k in range(num_features):
                    R[i][j] += abs(positions[i][k] -
                                   positions[j][k])  # Eq. (8)
                F[i][j] = G * (mass[i] * mass[j]) / (R[i][j] + eps) * (
                    positions[j] - positions[i])  # Eq. (7)

        # finding net force acting on each particle
        for i in range(num_agents):
            for j in kBest:
                if i != j:
                    force[i] += np.random.random() * F[i][j]  # Eq. (9)

        # finding acceleration of each particle
        for i in range(num_agents):
            acc[i] = force[i] / (mass[i] + eps)  # Eq. (10)

        # updating velocity of each particle
        velocity = np.random.random() * velocity + acc  # Eq. (11)

        # apply transformation function on the velocity
        for i in range(num_agents):
            for j in range(num_features):
                trans_value = trans_function(velocity[i][j])
                if (np.random.random() < trans_value):
                    positions[i][j] = 1
                else:
                    positions[i][j] = 0
            if np.sum(positions[i]) == 0:
                x = np.random.randint(0, num_features - 1)
                positions[i][x] = 1

        # update final information
        positions, fitness = sort_agents(positions, obj, data)
        display(positions, fitness, agent_name)
        if fitness[0] > Leader_fitness:
            Leader_agent = positions[0].copy()
            Leader_fitness = fitness[0].copy()

        convergence_curve['fitness'][iter_no] = np.mean(fitness)

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    positions, accuracy = sort_agents(positions, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence graph
    fig, axes = Conv_plot(convergence_curve)
    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = positions
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time
    return solution
Esempio n. 5
0
def BBA(num_agents,
        max_iter,
        train_data,
        train_label,
        obj_function=compute_fitness,
        trans_function_shape='s',
        constantLoudness=True,
        save_conv_graph=False):

    # Binary Bat Algorithm (BBA)
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of bats                                                #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    agent_name = "Bat"
    short_name = "BBA"
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_function_shape)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize bats and Leader (the agent with the max fitness)
    bats = initialize(num_agents, num_features)
    velocity = np.zeros([num_agents, num_features])
    fitness = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")
    accuracy = np.zeros(num_agents)

    # initialize some important parameters
    minFrequency = 0  # min freq, const if constantLoudness  == True
    maxFrequency = 2  # max freq, const if constantLoudness  == True
    A = 1.00  # loudness
    r = 0.15  # pulse emission rate

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)

    # initialize data class
    data = Data()
    val_size = float(
        input('Enter the percentage of data wanted for valdiation [0, 100]: ')
    ) / 100
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=val_size)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # start timer
    start_time = time.time()

    bats, fitness = sort_agents(bats, obj, data)

    Leader_agent = bats[0, :]
    Leader_fitness = fitness[0]

    alpha = 0.95
    gamma = 0.5
    A_t = A
    r_t = r

    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        if constantLoudness == False:
            A_t *= alpha
            r_t = r * (1 - np.exp(-1 * gamma * iter_no))

        for agentNumber in range(num_agents):
            fi = minFrequency + (maxFrequency - minFrequency) * np.random.rand(
            )  # frequency for i-th agent or bat

            # update velocity equation number 1 in paper
            velocity[agentNumber, :] = velocity[
                agentNumber, :] + (bats[agentNumber, :] - Leader_agent) * fi

            # updating the bats for bat number = agentNumber
            newPos = np.zeros([1, num_features])

            for featureNumber in range(num_features):
                transferValue = trans_function(velocity[agentNumber,
                                                        featureNumber])

                # change complement bats value at dimension number = featureNumber
                if np.random.rand() < transferValue:
                    newPos[0, featureNumber] = 1 - bats[agentNumber,
                                                        featureNumber]
                else:
                    newPos[0, featureNumber] = bats[agentNumber, featureNumber]

                # considering the current pulse rate
                if np.random.rand() > r_t:
                    newPos[0, featureNumber] = Leader_agent[featureNumber]

            ## calculate fitness for new bats
            newFit = obj_function(newPos, data.train_X, data.val_X,
                                  data.train_Y, data.val_Y, weight_acc)

            ## update better solution for indivisual bat
            if fitness[agentNumber] <= newFit and np.random.rand() <= A_t:
                fitness[agentNumber] = newFit
                bats[agentNumber, :] = newPos[0, :]

        bats, fitness = sort_agents(bats, obj, data)

        ## update (global) best solution for all bats
        if fitness[0] > Leader_fitness:
            Leader_fitness = fitness[0]
            Leader_agent = bats[0, :]

        convergence_curve['fitness'][iter_no] = np.mean(fitness)

        # display current agents
        display(bats, fitness, agent_name)

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    bats, accuracy = sort_agents(bats, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence graph
    fig, axes = Conv_plot(convergence_curve)
    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = bats
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 6
0
def HS(num_agents, max_iter, train_data, train_label, obj_function = compute_fitness, save_conv_graph = False):
    
    # Harmony Search Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of harmonies                                           #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #                
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    # <STEPS OF HARMOMY SEARCH ALGORITH>
    # Step 1. Initialize a Harmony Memory (HM).
    # Step 2. Improvise a new harmony from HM.
    # Step 3. If the new harmony is better than minimum harmony in HM, include the new harmony in HM, and exclude the minimum harmony from HM.
    # Step 4. If stopping criteria are not satisfied, go to Step 2.

    short_name = 'HS'
    agent_name = 'Harmony'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]

    # intialize the harmonies and Leader (the agent with the max fitness)
    harmonyMemory = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")
    HMCR = 0.90     # Harmony Memory Consideration Rate

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # format the data
    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(train_data, train_label, stratify=train_label, test_size=0.2)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # start timer
    start_time = time.time()

    # calculate initial fitess and sort the harmony memory and rank them
    harmonyMemory, fitness = sort_agents(harmonyMemory, obj_function, data)

    # create new harmonies in each iteration
    for iterCount in range(max_iter):
        print('\n================================================================================')
        print('                          Iteration - {}'.format(iterCount + 1))
        print('================================================================================\n')
        HMCR_randValue = np.random.rand()
        newHarmony = np.zeros([1, num_features])

        # print(HMCR)
        # print(HMCR_randValue)

        if HMCR_randValue <= HMCR:
            for featureNum in range(num_features):
                selectedAgent = random.randint(0, num_agents - 1)
                newHarmony[0, featureNum] = harmonyMemory[selectedAgent, featureNum]

        else:
            for featureNum in range(num_features):
                newHarmony[0, featureNum] = random.randint(0, 1)

        fitnessHarmony = obj_function(newHarmony, data.train_X, data.val_X, data.train_Y, data.val_Y)

        if fitness[num_agents-1] < fitnessHarmony:
            harmonyMemory[num_agents-1, :] = newHarmony
            fitness[num_agents-1] = fitnessHarmony

        # sort harmony memory
        harmonyMemory, fitness = sort_agents(harmonyMemory, obj_function, data)
        if fitness[0] > Leader_fitness:
            Leader_agent = harmonyMemory[0].copy()
            Leader_fitness = fitness[0].copy()

        # update 
        convergence_curve['fitness'][iterCount] = Leader_fitness
        convergence_curve['feature_count'][iterCount] = int(np.sum(Leader_agent))

        display(harmonyMemory, fitness, agent_name)
    
    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
    harmonyMemory, accuracy = sort_agents(harmonyMemory, compute_accuracy, data)

    print('\n================================================================================')
    print('                                    Final Result                                  ')
    print('================================================================================\n')
    print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
    print('\n================================================================================\n')

    # leader agent and leader fitneess
    Leader_fitness = fitness[0]
    Leader_agent = harmonyMemory[0].copy()


    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence curves
    iters = np.arange(max_iter)+1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad=5)
    fig.suptitle('Convergence Curves')

    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if(save_conv_graph):
        plt.savefig('convergence_graph_'+ short_name + '.jpg')
    plt.show()


    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = harmonyMemory
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time


    return solution
Esempio n. 7
0
def MA(num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, trans_function_shape='s',  prob_mut=0.2,  save_conv_graph=False):
    
    # Mayfly Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of mayflies                                            #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #                
    #   obj_function: the function to maximize while doing feature selection      #
    #   prob_mut: probability of mutation                                         #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'MA'
    agent_name = 'Mayfly'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_function_shape)
    
    # control parameters
    a1 = 1
    a2 = 1.5
    d = 0.1
    fl = 0.1
    g = 0.8
    beta = 2
    delta = 0.9
    
    # initialize position and velocities of male and female mayflies' and Leader (the agent with the max fitness)
    male_pos = initialize(num_agents, num_features)
    female_pos = initialize(num_agents, num_features)
    male_vel = np.random.uniform(low = -1, high = -1, size = (num_agents, num_features))
    female_vel = np.random.uniform(low = -1, high = -1, size = (num_agents, num_features))
    male_fitness = np.zeros((num_agents))
    male_accuracy = np.zeros(num_agents)
    female_fitness = np.zeros((num_agents))
    Leader_agent = np.zeros((num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")
    male_personal_best = np.zeros((num_agents, num_features))
    male_offspring = np.zeros((num_agents, num_features))
    female_offspring = np.zeros((num_agents, num_features))
    vmax_male = np.zeros((num_features))
    vmax_female = np.zeros((num_features))
    
    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)
    
    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(train_data, train_label, stratify=train_label, test_size=0.2)
    
    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function
    
    # rank initial population
    male_pos, male_fitness = sort_agents(male_pos, obj_function, data)
    female_pos, female_fitness = sort_agents(female_pos, obj_function, data)
    
    # start timer
    start_time = time.time()
    
    # main loop
    for iter_no in range(max_iter):
        print('\n================================================================================')
        print('                          Iteration - {}'.format(iter_no+1))
        print('================================================================================\n')
        
        #updating velocity limits
        vmax_male, vmax_female = update_max_velocity(male_pos, female_pos)
        
        for agent in range(num_agents):
            
            #updating Leader fitness and personal best fitnesses
            if male_fitness[agent] > Leader_fitness:
                Leader_fitness = male_fitness[agent]
                Leader_agent = male_pos[agent]
            
            if male_fitness[agent] > obj_function(male_personal_best[agent], data.train_X, data.val_X, data.train_Y, data.val_Y):
                male_personal_best[agent] = male_pos[agent]

            #update velocities of male and female mayflies
            male_vel[agent], female_vel[agent] = update_velocity(male_pos[agent], female_pos[agent], male_vel[agent], female_vel[agent], Leader_agent, male_personal_best[agent], a1, a2, d, fl, g, beta, agent, data, obj_function)
            
            #check boundary condition of velocities of male and female mayflies
            male_vel[agent], female_vel[agent] = check_velocity_limits(male_vel[agent], female_vel[agent], vmax_male, vmax_female)
            
            #applying transfer functions to update positions of male and female mayflies
            #the updation is done based on their respective velocity values
            for j in range(num_features):
                trans_value = trans_function(male_vel[agent][j])
                if trans_value > np.random.normal(0,1):
                    male_pos[agent][j]=1
                else:
                    male_pos[agent][j]=0

                trans_value = trans_function(female_vel[agent][j])
                if trans_value > np.random.random():
                    female_pos[agent][j]=1
                else:
                    female_pos[agent][j]=0
        
        #sorting 
        male_pos, male_fitness = sort_agents(male_pos, obj_function, data)
        female_pos, female_fitness = sort_agents(female_pos, obj_function, data)
        
        for agent in range(num_agents):
            
            #generation of offsprings by crossover and mutation between male and female parent mayflies
            male_offspring[agent], female_offspring[agent] = cross_mut(male_pos[agent], female_pos[agent])
            
        #comparing parents and offsprings and replacing parents wherever necessary
        male_pos = compare_and_replace(male_pos, male_offspring, male_fitness, data, obj_function)
        female_pos = compare_and_replace(female_pos, female_offspring, female_fitness, data, obj_function)
        
        #updating fitness values
        male_pos, male_fitness = sort_agents(male_pos, obj_function, data)
        female_pos, female_fitness = sort_agents(female_pos, obj_function, data)
        
        #updating values of nuptial dance
        d = d * delta
        fl = fl * delta
        
        #update final information
        display(male_pos, male_fitness, agent_name)
        if(male_fitness[0] > Leader_fitness):
            Leader_agent = male_pos[0].copy()
            Leader_fitness = male_fitness[0].copy()
        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))
    
    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
    male_pos, male_accuracy = sort_agents(male_pos, compute_accuracy, data)

    print('\n================================================================================')
    print('                                    Final Result                                  ')
    print('================================================================================\n')
    print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
    print('\n================================================================================\n')

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time
    
    # plot convergence curves
    iters = np.arange(max_iter)+1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad = 5) 
    fig.suptitle('Convergence Curves')
    
    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if(save_conv_graph):
        plt.savefig('convergence_graph_'+ short_name + '.jpg')

    plt.show()
    
    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = male_pos
    solution.final_fitness = male_fitness
    solution.final_accuracy = male_accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 8
0
def PSO(num_agents,
        max_iter,
        train_data,
        train_label,
        obj_function=compute_fitness,
        trans_func_shape='s',
        save_conv_graph=False):

    # Particle Swarm Optimizer
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of particles                                           #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'PSO'
    agent_name = 'Particle'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_func_shape)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize particles and Leader (the agent with the max fitness)
    particles = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)

    # initialize data class
    data = Data()
    val_size = float(
        input('Enter the percentage of data wanted for valdiation [0, 100]: ')
    ) / 100
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=val_size)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial particles
    particles, fitness = sort_agents(particles, obj, data)

    # start timer
    start_time = time.time()

    # initialize global and local best particles
    globalBestParticle = [0 for i in range(num_features)]
    globalBestFitness = float("-inf")
    localBestParticle = [[0 for i in range(num_features)]
                         for j in range(num_agents)]
    localBestFitness = [float("-inf") for i in range(num_agents)]
    weight = 1.0
    velocity = [[0.0 for i in range(num_features)] for j in range(num_agents)]

    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        # update weight
        weight = 1.0 - (iter_no / max_iter)

        # update the velocity
        for i in range(num_agents):
            for j in range(num_features):
                velocity[i][j] = (weight * velocity[i][j])
                r1, r2 = np.random.random(2)
                velocity[i][j] = velocity[i][j] + (
                    r1 * (localBestParticle[i][j] - particles[i][j]))
                velocity[i][j] = velocity[i][j] + (
                    r2 * (globalBestParticle[j] - particles[i][j]))

        # updating position of particles
        for i in range(num_agents):
            for j in range(num_features):
                trans_value = trans_function(velocity[i][j])
                if (np.random.random() < trans_value):
                    particles[i][j] = 1
                else:
                    particles[i][j] = 0

        # updating fitness of particles
        particles, fitness = sort_agents(particles, obj, data)
        display(particles, fitness, agent_name)

        # updating the global best and local best particles
        for i in range(num_agents):
            if fitness[i] > localBestFitness[i]:
                localBestFitness[i] = fitness[i]
                localBestParticle[i] = particles[i][:]

            if fitness[i] > globalBestFitness:
                globalBestFitness = fitness[i]
                globalBestParticle = particles[i][:]

        # update Leader (best agent)
        if globalBestFitness > Leader_fitness:
            Leader_agent = globalBestParticle.copy()
            Leader_fitness = globalBestFitness.copy()

        convergence_curve['fitness'][iter_no] = np.mean(fitness)

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    particles, accuracy = sort_agents(particles, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence graph
    fig, axes = Conv_plot(convergence_curve)
    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_particles = particles
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 9
0
def WOA(num_agents,
        max_iter,
        train_data,
        train_label,
        obj_function=compute_fitness,
        trans_function_shape='s',
        save_conv_graph=False):

    # Whale Optimization Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of whales                                              #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'WOA'
    agent_name = 'Whale'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    cross_limit = 5
    trans_function = get_trans_function(trans_function_shape)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize whales and Leader (the agent with the max fitness)
    whales = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # format the data
    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=0.2)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial population
    whales, fitness = sort_agents(whales, obj, data)

    # start timer
    start_time = time.time()

    # main loop
    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        a = 2 - iter_no * (2 / max_iter)  # a decreases linearly fron 2 to 0
        # update the position of each whale
        for i in range(num_agents):
            # update the parameters
            r = np.random.random()  # r is a random number in [0, 1]
            A = (2 * a * r) - a  # Eq. (3)
            C = 2 * r  # Eq. (4)
            l = -1 + (np.random.random() * 2
                      )  # l is a random number in [-1, 1]
            p = np.random.random()  # p is a random number in [0, 1]
            b = 1  # defines shape of the spiral

            if p < 0.5:
                # Shrinking Encircling mechanism
                if abs(A) >= 1:
                    rand_agent_index = np.random.randint(0, num_agents)
                    rand_agent = whales[rand_agent_index, :]
                    mod_dist_rand_agent = abs(C * rand_agent - whales[i, :])
                    whales[i, :] = rand_agent - (A * mod_dist_rand_agent
                                                 )  # Eq. (9)

                else:
                    mod_dist_Leader = abs(C * Leader_agent - whales[i, :])
                    whales[i, :] = Leader_agent - (A * mod_dist_Leader
                                                   )  # Eq. (2)

            else:
                # Spiral-Shaped Attack mechanism
                dist_Leader = abs(Leader_agent - whales[i, :])
                whales[i, :] = dist_Leader * np.exp(b * l) * np.cos(
                    l * 2 * np.pi) + Leader_agent

            # Apply transformation function on the updated whale
            for j in range(num_features):
                trans_value = trans_function(whales[i, j])
                if (np.random.random() < trans_value):
                    whales[i, j] = 1
                else:
                    whales[i, j] = 0

        # update final information
        whales, fitness = sort_agents(whales, obj, data)
        display(whales, fitness, agent_name)
        if fitness[0] > Leader_fitness:
            Leader_agent = whales[0].copy()
            Leader_fitness = fitness[0].copy()
        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    whales, accuracy = sort_agents(whales, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence curves
    iters = np.arange(max_iter) + 1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad=5)
    fig.suptitle('Convergence Curves')

    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = whales
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 10
0
def GWO(num_agents,
        max_iter,
        train_data,
        train_label,
        obj_function=compute_fitness,
        trans_func_shape='s',
        save_conv_graph=False):

    # Grey Wolf Optimizer
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of greywolves                                          #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'GWO'
    agent_name = 'Greywolf'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_func_shape)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize greywolves and Leader (the agent with the max fitness)
    greywolves = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # initialize data class
    data = Data()
    val_size = float(
        input('Enter the percentage of data wanted for valdiation [0, 100]: ')
    ) / 100
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=val_size)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial greywolves
    greywolves, fitness = sort_agents(greywolves, obj, data)

    # start timer
    start_time = time.time()

    # initialize the alpha, beta and delta grey wolves and their fitness
    alpha, beta, delta = np.zeros((1, num_features)), np.zeros(
        (1, num_features)), np.zeros((1, num_features))
    alpha_fit, beta_fit, delta_fit = float("-inf"), float("-inf"), float(
        "-inf")

    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        # update the alpha, beta and delta grey wolves
        for i in range(num_agents):

            # update alpha, beta, delta
            if fitness[i] > alpha_fit:
                delta_fit = beta_fit
                delta = beta.copy()
                beta_fit = alpha_fit
                beta = alpha.copy()
                alpha_fit = fitness[i]
                alpha = greywolves[i, :].copy()

            # update beta, delta
            elif fitness[i] > beta_fit:
                delta_fit = beta_fit
                delta = beta.copy()
                beta_fit = fitness[i]
                beta = greywolves[i, :].copy()

            # update delta
            elif fitness[i] > delta_fit:
                delta_fit = fitness[i]
                delta = greywolves[i, :].copy()

        # a decreases linearly fron 2 to 0
        a = 2 - iter_no * ((2) / max_iter)

        for i in range(num_agents):
            for j in range(num_features):

                # calculate distance between alpha and current agent
                r1 = np.random.random()  # r1 is a random number in [0,1]
                r2 = np.random.random()  # r2 is a random number in [0,1]
                A1 = (2 * a * r1) - a  # calculate A1
                C1 = 2 * r2  # calculate C1
                D_alpha = abs(C1 * alpha[j] -
                              greywolves[i, j])  # find distance from alpha
                X1 = alpha[j] - (A1 * D_alpha)  # Eq. (3.6)

                # calculate distance between beta and current agent
                r1 = np.random.random()  # r1 is a random number in [0,1]
                r2 = np.random.random()  # r2 is a random number in [0,1]
                A2 = (2 * a * r1) - a  # calculate A2
                C2 = 2 * r2  # calculate C2
                D_beta = abs(C2 * beta[j] -
                             greywolves[i, j])  # find distance from beta
                X2 = beta[j] - (A2 * D_beta)  # Eq. (3.6)

                # calculate distance between delta and current agent
                r1 = np.random.random()  # r1 is a random number in [0,1]
                r2 = np.random.random()  # r2 is a random number in [0,1]
                A3 = (2 * a * r1) - a  # calculate A3
                C3 = 2 * r2  # calculate C3
                D_delta = abs(C3 * delta[j] -
                              greywolves[i, j])  # find distance from delta
                X3 = delta[j] - A3 * D_delta  # Eq. (3.6)

                # update the position of current agent
                greywolves[i, j] = (X1 + X2 + X3) / 3  # Eq. (3.7)

            # Apply transformation function on the updated greywolf
            for j in range(num_features):
                trans_value = trans_function(greywolves[i, j])
                if (np.random.random() < trans_value):
                    greywolves[i, j] = 1
                else:
                    greywolves[i, j] = 0

        # update final information
        greywolves, fitness = sort_agents(greywolves, obj, data)
        display(greywolves, fitness, agent_name)

        # update Leader (best agent)
        if fitness[0] > Leader_fitness:
            Leader_agent = greywolves[0].copy()
            Leader_fitness = fitness[0].copy()

        if alpha_fit > Leader_fitness:
            Leader_fitness = alpha_fit
            Leader_agent = alpha.copy()

        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    greywolves, accuracy = sort_agents(greywolves, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence curves
    iters = np.arange(max_iter) + 1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad=5)
    fig.suptitle('Convergence Curves')

    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_greywolves = greywolves
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 11
0
def CS (num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, trans_function_shape='s', save_conv_graph=False):
    
    # Cuckoo Search Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of agents                                              #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #                
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'CS'
    agent_name = 'Agent'
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_function_shape)

    # setting up the objectives
    weight_acc = None
    if(obj_function==compute_fitness):
        weight_acc = float(input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (compute_fitness, 1) # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initializing cuckoo and host nests
    levy_flight = np.random.uniform(low=-2, high=2, size=(num_features))
    cuckoo = np.random.randint(low=0, high=2, size=(num_features))
    nest = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    nest_accuracy = np.zeros(num_agents)
    cuckoo_fitness = float("-inf")
    Leader_agent = np.zeros((num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")
    p_a=0.25    # fraction of nests to be replaced   

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)

    # initialize data class
    data = Data()
    val_size = float(input('Enter the percentage of data wanted for valdiation [0, 100]: '))/100
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(train_data, train_label, stratify=train_label, test_size=val_size)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial nests
    nest, fitness = sort_agents(nest, obj, data)
    cuckoo,cuckoo_fitness = sort_agents(cuckoo,obj,data)

    # start timer
    start_time = time.time()

    # main loop
    for iter_no in range(max_iter):
        print('\n================================================================================')
        print('                          Iteration - {}'.format(iter_no+1))
        print('================================================================================\n')

        # updating leader nest
        if fitness[0] > Leader_fitness:
            Leader_agent = nest[0].copy()
            Leader_fitness = fitness[0]

        # get new cuckoo
        levy_flight = get_cuckoo(levy_flight)
        for j in range(num_features):
            if trans_function(levy_flight[j]) > np.random.random():
                cuckoo[j]=1
            else:
                cuckoo[j]=0
        cuckoo,cuckoo_fitness = sort_agents(cuckoo,obj,data)
        
        # check if a nest needs to be replaced
        j = np.random.randint(0,num_agents)
        if cuckoo_fitness > fitness[j]:
            nest[j] = cuckoo.copy()
            fitness[j] = cuckoo_fitness

        nest, fitness = sort_agents(nest, obj, data)

        # eliminate worse nests and generate new ones
        nest = replace_worst(nest, p_a)

        nest, fitness = sort_agents(nest, obj, data)

        # update final information
        display(nest, fitness, agent_name)

        if fitness[0]>Leader_fitness:
            Leader_agent = nest[0].copy()
            Leader_fitness = fitness[0].copy()

        convergence_curve['fitness'][iter_no] = np.mean(fitness)

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
    nest, nest_accuracy = sort_agents(nest, compute_accuracy, data)

    print('\n================================================================================')
    print('                                    Final Result                                  ')
    print('================================================================================\n')
    print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
    print('\n================================================================================\n')

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence graph
    fig, axes = Conv_plot(convergence_curve)
    if(save_conv_graph):
        plt.savefig('convergence_graph_'+ short_name + '.jpg')
    plt.show()


    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = nest
    solution.final_fitness = fitness
    solution.final_accuracy = nest_accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 12
0
File: CS.py Progetto: ssen110/Py_FS
def CS(num_agents,
       max_iter,
       train_data,
       train_label,
       obj_function=compute_fitness,
       trans_function_shape='s',
       save_conv_graph=False):

    # Cuckoo Search Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of agents                                              #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'CS'
    agent_name = 'Agent'
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_function_shape)

    # initializing cuckoo and host nests
    levy_flight = np.random.uniform(low=-2, high=2, size=(num_features))
    cuckoo = np.random.randint(low=0, high=2, size=(num_features))
    nest = initialize(num_agents, num_features)
    nest_fitness = np.zeros(num_agents)
    nest_accuracy = np.zeros(num_agents)
    cuckoo_fitness = float("-inf")
    Leader_agent = np.zeros((num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")
    p_a = 0.25  # fraction of nests to be replaced

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=0.2)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial nests
    nest, nest_fitness = sort_agents(nest, obj_function, data)

    # start timer
    start_time = time.time()

    # main loop
    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        # updating leader nest
        if nest_fitness[0] > Leader_fitness:
            Leader_agent = nest[0].copy()
            Leader_fitness = nest_fitness[0]

        # get new cuckoo
        levy_flight = get_cuckoo(levy_flight)
        for j in range(num_features):
            if trans_function(levy_flight[j]) > np.random.random():
                cuckoo[j] = 1
            else:
                cuckoo[j] = 0

        # check if a nest needs to be replaced
        j = np.random.randint(0, num_agents)
        if cuckoo_fitness > nest_fitness[j]:
            nest[j] = cuckoo.copy()
            nest_fitness[j] = cuckoo_fitness

        nest, nest_fitness = sort_agents(nest, obj_function, data)

        # eliminate worse nests and generate new ones
        nest = replace_worst(nest, p_a)

        nest, nest_fitness = sort_agents(nest, obj_function, data)

        # update final information
        display(nest, nest_fitness, agent_name)

        if nest_fitness[0] > Leader_fitness:
            Leader_agent = nest[0].copy()
            Leader_fitness = nest_fitness[0].copy()

        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    nest, nest_accuracy = sort_agents(nest, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence curves
    iters = np.arange(max_iter) + 1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad=5)
    fig.suptitle('Convergence Curves')

    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])

    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = nest
    solution.final_fitness = nest_fitness
    solution.final_accuracy = nest_accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 13
0
def GA(num_agents,
       max_iter,
       train_data,
       train_label,
       obj_function=compute_fitness,
       prob_cross=0.4,
       prob_mut=0.3,
       save_conv_graph=False,
       seed=0):

    # Genetic Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of chromosomes                                         #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   prob_cross: probability of crossover                                      #
    #   prob_mut: probability of mutation                                         #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'GA'
    agent_name = 'Chromosome'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    cross_limit = 5
    np.random.seed(seed)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))

    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize chromosomes and Leader (the agent with the max fitness)
    chromosomes = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)

    # initialize data class
    data = Data()
    val_size = float(
        input('Enter the percentage of data wanted for valdiation [0, 100]: ')
    ) / 100
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=val_size)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial population
    chromosomes, fitness = sort_agents(chromosomes, obj, data)

    # start timer
    start_time = time.time()

    # main loop
    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        # perform crossover, mutation and replacement
        chromosomes, fitness = cross_mut(chromosomes, fitness, obj, data,
                                         prob_cross, cross_limit, prob_mut)

        # update final information
        chromosomes, fitness = sort_agents(chromosomes, obj, data, fitness)
        display(chromosomes, fitness, agent_name)

        if fitness[0] > Leader_fitness:
            Leader_agent = chromosomes[0].copy()
            Leader_fitness = fitness[0].copy()

        convergence_curve['fitness'][iter_no] = np.mean(fitness)

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    chromosomes, accuracy = sort_agents(chromosomes, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence graph
    fig, axes = Conv_plot(convergence_curve)
    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = chromosomes
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 14
0
File: GA.py Progetto: ssen110/Py_FS
def GA(num_agents, max_iter, train_data, train_label, obj_function=compute_fitness, prob_cross=0.4, prob_mut=0.3, save_conv_graph=False):

    # Genetic Algorithm
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of chromosomes                                         #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #                
    #   obj_function: the function to maximize while doing feature selection      #
    #   prob_cross: probability of crossover                                      #
    #   prob_mut: probability of mutation                                         #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = 'GA'
    agent_name = 'Chromosome'
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    cross_limit = 5

    # initialize chromosomes and Leader (the agent with the max fitness)
    chromosomes = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # format the data 
    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(train_data, train_label, stratify=train_label, test_size=0.2)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial population
    chromosomes, fitness = sort_agents(chromosomes, obj_function, data)

    # start timer
    start_time = time.time()

    # main loop
    for iter_no in range(max_iter):
        print('\n================================================================================')
        print('                          Iteration - {}'.format(iter_no+1))
        print('================================================================================\n')

        # perform crossover, mutation and replacement
        cross_mut(chromosomes, fitness, obj_function, data, prob_cross, cross_limit, prob_mut)

        # update final information
        chromosomes, fitness = sort_agents(chromosomes, obj_function, data)
        display(chromosomes, fitness, agent_name)
        if fitness[0]>Leader_fitness:
            Leader_agent = chromosomes[0].copy()
            Leader_fitness = fitness[0].copy()
        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy, data)
    chromosomes, accuracy = sort_agents(chromosomes, compute_accuracy, data)

    print('\n================================================================================')
    print('                                    Final Result                                  ')
    print('================================================================================\n')
    print('Leader ' + agent_name + ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name + ' Classification Accuracy : {}'.format(Leader_accuracy))
    print('\n================================================================================\n')

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time
    
    # plot convergence curves
    iters = np.arange(max_iter)+1
    fig, axes = plt.subplots(2, 1)
    fig.tight_layout(pad = 5) 
    fig.suptitle('Convergence Curves')
    
    axes[0].set_title('Convergence of Fitness over Iterations')
    axes[0].set_xlabel('Iteration')
    axes[0].set_ylabel('Fitness')
    axes[0].plot(iters, convergence_curve['fitness'])

    axes[1].set_title('Convergence of Feature Count over Iterations')
    axes[1].set_xlabel('Iteration')
    axes[1].set_ylabel('Number of Selected Features')
    axes[1].plot(iters, convergence_curve['feature_count'])
    
    if(save_conv_graph):
        plt.savefig('convergence_graph_'+ short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_population = chromosomes
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution
Esempio n. 15
0
def Name_of_the_wrapper(num_agents,
                        max_iter,
                        train_data,
                        train_label,
                        obj_function=compute_fitness,
                        trans_func_shape='s',
                        save_conv_graph=False):

    # Name of the optimizer
    ############################### Parameters ####################################
    #                                                                             #
    #   num_agents: number of agents                                              #
    #   max_iter: maximum number of generations                                   #
    #   train_data: training samples of data                                      #
    #   train_label: class labels for the training samples                        #
    #   obj_function: the function to maximize while doing feature selection      #
    #   trans_function_shape: shape of the transfer function used                 #
    #   save_conv_graph: boolean value for saving convergence graph               #
    #                                                                             #
    ###############################################################################

    short_name = ''
    agent_name = ''
    train_data, train_label = np.array(train_data), np.array(train_label)
    num_features = train_data.shape[1]
    trans_function = get_trans_function(trans_func_shape)

    # setting up the objectives
    weight_acc = None
    if (obj_function == compute_fitness):
        weight_acc = float(
            input('Weight for the classification accuracy [0-1]: '))
    obj = (obj_function, weight_acc)
    compute_accuracy = (
        compute_fitness, 1
    )  # compute_accuracy is just compute_fitness with accuracy weight as 1

    # initialize agents and Leader (the agent with the max fitness)
    agents = initialize(num_agents, num_features)
    fitness = np.zeros(num_agents)
    accuracy = np.zeros(num_agents)
    Leader_agent = np.zeros((1, num_features))
    Leader_fitness = float("-inf")
    Leader_accuracy = float("-inf")

    # initialize convergence curves
    convergence_curve = {}
    convergence_curve['fitness'] = np.zeros(max_iter)
    convergence_curve['feature_count'] = np.zeros(max_iter)

    # format the data
    data = Data()
    data.train_X, data.val_X, data.train_Y, data.val_Y = train_test_split(
        train_data, train_label, stratify=train_label, test_size=0.2)

    # create a solution object
    solution = Solution()
    solution.num_agents = num_agents
    solution.max_iter = max_iter
    solution.num_features = num_features
    solution.obj_function = obj_function

    # rank initial agents
    agents, fitness = sort_agents(agents, obj, data)

    # start timer
    start_time = time.time()

    for iter_no in range(max_iter):
        print(
            '\n================================================================================'
        )
        print('                          Iteration - {}'.format(iter_no + 1))
        print(
            '================================================================================\n'
        )

        ################ write your main position update code here ################

        ###########################################################################

        # update final information
        agents, fitness = sort_agents(agents, obj, data)
        display(agents, fitness, agent_name)

        # update Leader (best agent)
        if fitness[0] > Leader_fitness:
            Leader_agent = agents[0].copy()
            Leader_fitness = fitness[0].copy()

        convergence_curve['fitness'][iter_no] = Leader_fitness
        convergence_curve['feature_count'][iter_no] = int(np.sum(Leader_agent))

    # compute final accuracy
    Leader_agent, Leader_accuracy = sort_agents(Leader_agent, compute_accuracy,
                                                data)
    agents, accuracy = sort_agents(agents, compute_accuracy, data)

    print(
        '\n================================================================================'
    )
    print(
        '                                    Final Result                                  '
    )
    print(
        '================================================================================\n'
    )
    print('Leader ' + agent_name +
          ' Dimension : {}'.format(int(np.sum(Leader_agent))))
    print('Leader ' + agent_name + ' Fitness : {}'.format(Leader_fitness))
    print('Leader ' + agent_name +
          ' Classification Accuracy : {}'.format(Leader_accuracy))
    print(
        '\n================================================================================\n'
    )

    # stop timer
    end_time = time.time()
    exec_time = end_time - start_time

    # plot convergence graph
    fig, axes = Conv_plot(convergence_curve)
    if (save_conv_graph):
        plt.savefig('convergence_graph_' + short_name + '.jpg')
    plt.show()

    # update attributes of solution
    solution.best_agent = Leader_agent
    solution.best_fitness = Leader_fitness
    solution.best_accuracy = Leader_accuracy
    solution.convergence_curve = convergence_curve
    solution.final_agents = agents
    solution.final_fitness = fitness
    solution.final_accuracy = accuracy
    solution.execution_time = exec_time

    return solution