Beispiel #1
0
from mealpy.evolutionary_based.GA import BaseGA

## Setting parameters
obj_func = F5
verbose = False
epoch = 500
pop_size = 50

# A - Different way to provide lower bound and upper bound. Here are some examples:

## 1. When you have different lower bound and upper bound for each parameters
lb1 = [-3, -5, 1, -10]
ub1 = [5, 10, 100, 30]

md1 = BaseGA(obj_func, lb1, ub1, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[1])

## 2. When you have same lower bound and upper bound for each parameters, then you can use:
##      + int or float: then you need to specify your problem size (number of dimensions)
problemSize = 10
lb2 = -5
ub2 = 10
md2 = BaseGA(obj_func,
             lb2,
             ub2,
             verbose,
             epoch,
             pop_size,
             problem_size=problemSize)  # Remember the keyword "problem_size"
best_pos1, best_fit1, list_loss1 = md2.train()
Beispiel #2
0
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 10:11, 16/03/2020                                                        %
#                                                                                                       %
#       Email:      [email protected]                                                           %
#       Homepage:   https://www.researchgate.net/profile/Thieu_Nguyen6                                  %
#       Github:     https://github.com/thieu1995                                                  %
#-------------------------------------------------------------------------------------------------------%

from opfunu.cec_basic.cec2014_nobias import *
from mealpy.evolutionary_based.GA import BaseGA


## Setting parameters
obj_func = F5
verbose = False
epoch = 10
pop_size = 50

lb1 = [-3, -5, 1, -10]
ub1 = [5, 10, 100, 30]

md1 = BaseGA(obj_func, lb1, ub1, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[1])


Beispiel #3
0
verbose = True
epoch = 100
pop_size = 50

lb1 = [-10, -5, -15, -20]
ub1 = [10, 5, 15, 20]

optimizer = BaseGA(obj_function,
                   lb1,
                   ub1,
                   "max",
                   verbose,
                   epoch,
                   pop_size,
                   obj_weight=[0.2, 0.5, 0.3])
best_position, best_fitness, g_best_fit_list, c_best_fit_list = optimizer.train(
)
print(best_position)

export_convergence_chart(
    optimizer.history_list_g_best_fit, title='Global Best Fitness'
)  # Draw global best fitness found so far in previous generations
export_convergence_chart(
    optimizer.history_list_c_best_fit, title='Local Best Fitness'
)  # Draw current best fitness in each previous generation
export_convergence_chart(optimizer.history_list_epoch_time,
                         title='Runtime chart',
                         y_label="Second")  # Draw runtime for each generation

## On the exploration and exploitation in popular swarm-based metaheuristic algorithms

# This exploration/exploitation chart should draws for single algorithm and single fitness function
Beispiel #4
0
from numpy import array

## Setting parameters
problem1 = {
    "obj_func": F5,
    "lb": [-3, -5, 1, -10],
    "ub": [5, 10, 100, 30],
    "minmax": "min",
    "verbose": True,
}

# A - Different way to provide lower bound and upper bound. Here are some examples:

## 1. When you have different lower bound and upper bound for each parameters
md1 = BaseGA(problem1, epoch=10, pop_size=50)
best_pos1, best_fit1 = md1.train()
print(md1.solution[1])

## 2. When you have same lower bound and upper bound for each parameters, then you can use:
##      + int or float: then you need to specify your problem size (number of dimensions)
problem2 = {
    "obj_func": F5,
    "lb": -10,
    "ub": 30,
    "minmax": "min",
    "verbose": True,
    "problem_size": 30,  # Remember the keyword "problem_size"
}
md2 = BaseGA(problem2, epoch=10,
             pop_size=50)  # Remember the keyword "problem_size"
md2.train()