Beispiel #1
0
func = F18
epoch = 100

problem_size = 100

from mealpy.swarm_based.WOA import BaseWOA
from mealpy.swarm_based.SpaSA import BaseSpaSA
from mealpy.evolutionary_based.GA import BaseGA
from mealpy.swarm_based.GWO import BaseGWO
from mealpy.human_based.TLO import BaseTLO
from mealpy.human_based.QSA import ImprovedQSA
from mealpy.physics_based.EFO import BaseEFO

temp1 = BaseTLO(func,
                problem_size=problem_size,
                domain_range=(-100, 100),
                log=True,
                epoch=epoch,
                pop_size=50)
temp1._train__()

temp1 = BaseSpaSA(func,
                  problem_size=problem_size,
                  domain_range=(-100, 100),
                  log=True,
                  epoch=epoch,
                  pop_size=50)
temp1._train__()

temp2 = BaseEFO(func,
                problem_size=problem_size,
                domain_range=(-100, 100),
Beispiel #2
0
from opfunu.cec_basic.cec2014 import *

## Setting parameters
problem_size = 30
func = ObjFunc14(problem_size)
domain_range = [-15, 15]
log = True
epoch = 500
pop_size = 50

# pc = 0.95
# pm = 0.025
# md = BaseGA(func.F1, problem_size, domain_range, log, epoch, pop_size, pc, pm)
# best_position, best_fit, list_loss = md._train__()
# print(best_fit)

md2 = BaseWOA(F18, problem_size, domain_range, log, epoch, pop_size)
best_position2, best_fit2, list_loss2 = md2._train__()
print(best_fit2)

md3 = BaseTLO(F18, problem_size, domain_range, log, epoch, pop_size)
best_position3, best_fit3, list_loss3 = md3._train__()
print(best_fit3)

md4 = BaseHGSO(F18, problem_size, domain_range, log, epoch, pop_size)
best_position4, best_fit4, list_loss4 = md4._train__()
print(best_fit4)

md5 = LevyHGSO(F18, problem_size, domain_range, log, epoch, pop_size)
best_position5, best_fit5, list_loss5 = md5._train__()
print(best_fit5)
Beispiel #3
0
#-------------------------------------------------------------------------------------------------------%

from opfunu.cec_basic.cec2014_nobias import *
from mealpy.human_based.TLO import BaseTLO, OriginalTLO

## Setting parameters
obj_func = F3
# lb = [-15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3]
# ub = [15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3]
lb = [-100]
ub = [100]
problem_size = 2000
batch_size = 25
verbose = True
epoch = 1000
pop_size = 50

md1 = OriginalTLO(obj_func, lb, ub, problem_size, batch_size, verbose, epoch,
                  pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[0])
print(md1.solution[1])
print(md1.loss_train)

md1 = BaseTLO(obj_func, lb, ub, problem_size, batch_size, verbose, epoch,
              pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[0])
print(md1.solution[1])
print(md1.loss_train)
Beispiel #4
0
from opfunu.cec_basic.cec2014_nobias import *
from mealpy.human_based.TLO import BaseTLO, OriginalTLO

# Setting parameters
obj_func = F5
verbose = False
epoch = 10
pop_size = 50

# A - Different way to provide lower bound and upper bound. Here are some examples:

## 1. When you have different lower bound and upper bound for each parameters
lb1 = [-3, -5, 1]
ub1 = [5, 10, 100]

md1 = BaseTLO(obj_func, lb1, ub1, verbose, epoch, pop_size)
best_pos1, best_fit1, list_loss1 = md1.train()
print(md1.solution[1])

## 2. When you have same lower bound and upper bound for each parameters, then you can use:
##      + int or float: then you need to specify your problem size (number of dimensions)
problemSize = 10
lb2 = -5
ub2 = 10
md2 = BaseTLO(obj_func,
              lb2,
              ub2,
              verbose,
              epoch,
              pop_size,
              problem_size=problemSize)  # Remember the keyword "problem_size"