Exemplo n.º 1
0
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 10:11, 16/03/2020                                                        %
#                                                                                                       %
#       Email:      [email protected]                                                           %
#       Homepage:   https://www.researchgate.net/profile/Thieu_Nguyen6                                  %
#       Github:     https://github.com/thieunguyen5991                                                  %
#-------------------------------------------------------------------------------------------------------%

from opfunu.type_based.uni_modal import Functions
from mealpy.evolutionary_based.GA import BaseGA

t1 = Functions()

root_paras = {
    "problem_size": 30,
    "domain_range": [-15, 15],
    "print_train": True,
    "objective_func": t1._sum_squres__
}

## Setting parameters
epoch = 100
pop_size = 50
pc = 0.95
pm = 0.025

md = BaseGA(root_paras, epoch, pop_size, pc, pm)
best_position, best_fit, list_loss = md._train__()
print(best_fit)
Exemplo n.º 2
0
from opfunu.cec_basic.cec2014_nobias import *
from mealpy.evolutionary_based.GA import BaseGA

## Setting parameters
obj_func = F5
verbose = False
epoch = 500
pop_size = 50

# A - Different way to provide lower bound and upper bound. Here are some examples:

## 1. When you have different lower bound and upper bound for each parameters
lb1 = [-3, -5, 1, -10]
ub1 = [5, 10, 100, 30]

md1 = BaseGA(obj_func, lb1, ub1, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[1])

## 2. When you have same lower bound and upper bound for each parameters, then you can use:
##      + int or float: then you need to specify your problem size (number of dimensions)
problemSize = 10
lb2 = -5
ub2 = 10
md2 = BaseGA(obj_func,
             lb2,
             ub2,
             verbose,
             epoch,
             pop_size,
             problem_size=problemSize)  # Remember the keyword "problem_size"
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 09:33, 17/03/2020                                                        %
#                                                                                                       %
#       Email:      [email protected]                                                           %
#       Homepage:   https://www.researchgate.net/profile/Thieu_Nguyen6                                  %
#       Github:     https://github.com/thieunguyen5991                                                  %
# -------------------------------------------------------------------------------------------------------%

from numpy import sum
from mealpy.evolutionary_based.GA import BaseGA


def my_objective_function(solution):
    return sum(solution**2)


## Setting parameters
objective_func = my_objective_function
problem_size = 30
domain_range = [-15, 15]
log = True
epoch = 100
pop_size = 50
pc = 0.95
pm = 0.025

md = BaseGA(objective_func, problem_size, domain_range, log, epoch, pop_size,
            pc, pm)
best_position, best_fit, list_loss = md._train__()
print(best_fit)
Exemplo n.º 4
0
        t3 += (1 + solution[i]**2)**0.5
    return [t1, t2, t3]


## Setting parameters
verbose = True
epoch = 100
pop_size = 50

lb1 = [-10, -5, -15, -20]
ub1 = [10, 5, 15, 20]

optimizer = BaseGA(obj_function,
                   lb1,
                   ub1,
                   "max",
                   verbose,
                   epoch,
                   pop_size,
                   obj_weight=[0.2, 0.5, 0.3])
best_position, best_fitness, g_best_fit_list, c_best_fit_list = optimizer.train(
)
print(best_position)

export_convergence_chart(
    optimizer.history_list_g_best_fit, title='Global Best Fitness'
)  # Draw global best fitness found so far in previous generations
export_convergence_chart(
    optimizer.history_list_c_best_fit, title='Local Best Fitness'
)  # Draw current best fitness in each previous generation
export_convergence_chart(optimizer.history_list_epoch_time,
                         title='Runtime chart',
Exemplo n.º 5
0
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 10:11, 16/03/2020                                                        %
#                                                                                                       %
#       Email:      [email protected]                                                           %
#       Homepage:   https://www.researchgate.net/profile/Thieu_Nguyen6                                  %
#       Github:     https://github.com/thieu1995                                                  %
#-------------------------------------------------------------------------------------------------------%

from opfunu.cec_basic.cec2014_nobias import *
from mealpy.evolutionary_based.GA import BaseGA


## Setting parameters
obj_func = F5
verbose = False
epoch = 10
pop_size = 50

lb1 = [-3, -5, 1, -10]
ub1 = [5, 10, 100, 30]

md1 = BaseGA(obj_func, lb1, ub1, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[1])


Exemplo n.º 6
0
    export_diversity_chart, export_objectives_chart, export_trajectory_chart
from numpy import array

## Setting parameters
problem1 = {
    "obj_func": F5,
    "lb": [-3, -5, 1, -10],
    "ub": [5, 10, 100, 30],
    "minmax": "min",
    "verbose": True,
}

# A - Different way to provide lower bound and upper bound. Here are some examples:

## 1. When you have different lower bound and upper bound for each parameters
md1 = BaseGA(problem1, epoch=10, pop_size=50)
best_pos1, best_fit1 = md1.train()
print(md1.solution[1])

## 2. When you have same lower bound and upper bound for each parameters, then you can use:
##      + int or float: then you need to specify your problem size (number of dimensions)
problem2 = {
    "obj_func": F5,
    "lb": -10,
    "ub": 30,
    "minmax": "min",
    "verbose": True,
    "problem_size": 30,  # Remember the keyword "problem_size"
}
md2 = BaseGA(problem2, epoch=10,
             pop_size=50)  # Remember the keyword "problem_size"
Exemplo n.º 7
0
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 10:11, 16/03/2020                                                        %
#                                                                                                       %
#       Email:      [email protected]                                                           %
#       Homepage:   https://www.researchgate.net/profile/Thieu_Nguyen6                                  %
#       Github:     https://github.com/thieu1995                                                  %
#-------------------------------------------------------------------------------------------------------%

from opfunu.cec_basic.cec2014_nobias import *
from mealpy.evolutionary_based.GA import BaseGA

## Setting parameters
obj_func = F1
# lb = [-15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3]
# ub = [15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3]
lb = [-100]
ub = [100]
problem_size = 100
batch_size = 25
verbose = True
epoch = 1000
pop_size = 50

md1 = BaseGA(obj_func, lb, ub, problem_size, batch_size, verbose, epoch,
             pop_size, 0.85, 0.05)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[0])
print(md1.solution[1])
print(md1.loss_train)
Exemplo n.º 8
0
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 22:08, 22/05/2020                                                        %
#                                                                                                       %
#       Email:      [email protected]                                                           %
#       Homepage:   https://www.researchgate.net/profile/Thieu_Nguyen6                                  %
#       Github:     https://github.com/thieunguyen5991                                                  %
#-------------------------------------------------------------------------------------------------------%

from mealpy.evolutionary_based.GA import BaseGA
from opfunu.cec_basic.cec2014_nobias import *

## Setting parameters
objective_func = F1
problem_size = 100
domain_range = [-100, 100]
log = True

epoch = 100
pop_size = 50

md1 = BaseGA(objective_func, problem_size, domain_range, log, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1._train__()
print(best_fit1)