#!/usr/bin/env python # ------------------------------------------------------------------------------------------------------% # Created by "Thieu Nguyen" at 19:05, 29/05/2020 % # % # Email: [email protected] % # Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 % # Github: https://github.com/thieunguyen5991 % #-------------------------------------------------------------------------------------------------------% from opfunu.cec_basic.cec2014_nobias import * from mealpy.swarm_based.SpaSA import BaseSpaSA ## Setting parameters objective_func = F21 problem_size = 3000 domain_range = [-150, 150] log = True epoch = 100 pop_size = 50 md1 = BaseSpaSA(objective_func, problem_size, domain_range, log, epoch, pop_size) best_pos1, best_fit1, list_loss1 = md1._train__() print(best_fit1) print("========================================================")
from mealpy.swarm_based.GWO import BaseGWO from mealpy.human_based.TLO import BaseTLO from mealpy.human_based.QSA import ImprovedQSA from mealpy.physics_based.EFO import BaseEFO temp1 = BaseTLO(func, problem_size=problem_size, domain_range=(-100, 100), log=True, epoch=epoch, pop_size=50) temp1._train__() temp1 = BaseSpaSA(func, problem_size=problem_size, domain_range=(-100, 100), log=True, epoch=epoch, pop_size=50) temp1._train__() temp2 = BaseEFO(func, problem_size=problem_size, domain_range=(-100, 100), log=True, epoch=epoch, pop_size=50) temp2._train__() temp2 = ImprovedQSA(func, problem_size=problem_size, domain_range=(-100, 100),
#-------------------------------------------------------------------------------------------------------% from opfunu.cec_basic.cec2014_nobias import * from mealpy.swarm_based.SpaSA import BaseSpaSA, OriginalSpaSA ## Setting parameters obj_func = F19 # lb = [-15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3, -15, -10, -3] # ub = [15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3, 15, 10, 3] lb = [-100] ub = [100] problem_size = 1000 batch_size = 25 verbose = True epoch = 100 pop_size = 50 md1 = OriginalSpaSA(obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size) best_pos1, best_fit1, list_loss1 = md1.train() print(md1.solution[0]) print(md1.solution[1]) print(md1.loss_train) md1 = BaseSpaSA(obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size) best_pos1, best_fit1, list_loss1 = md1.train() print(md1.solution[0]) print(md1.solution[1]) print(md1.loss_train)
list_funcs = [ F30, ] problem_size = 10 solution = np.ones(problem_size) for F in list_funcs: result1 = F(solution) print(result1) # test1 = F14(solution, shift_num=1, rotate_num=1) # print(test1) temp = BaseSpaSA(F23, problem_size=10, domain_range=(-100, 100), log=True, epoch=1000, pop_size=100) temp._train__() temp = BaseWOA(F23, problem_size=10, domain_range=(-100, 100), log=True, epoch=1000, pop_size=100) temp._train__()
from opfunu.cec_basic.cec2014_nobias import * from mealpy.swarm_based.SpaSA import BaseSpaSA, OriginalSpaSA # Setting parameters obj_func = F5 verbose = False epoch = 500 pop_size = 50 # A - Different way to provide lower bound and upper bound. Here are some examples: ## 1. When you have different lower bound and upper bound for each parameters lb1 = [-3, -5, 1] ub1 = [5, 10, 100] md1 = BaseSpaSA(obj_func, lb1, ub1, verbose, epoch, pop_size) best_pos1, best_fit1, list_loss1 = md1.train() print(md1.solution[1]) ## 2. When you have same lower bound and upper bound for each parameters, then you can use: ## + int or float: then you need to specify your problem size (number of dimensions) problemSize = 10 lb2 = -5 ub2 = 10 md2 = BaseSpaSA( obj_func, lb2, ub2, verbose, epoch, pop_size, problem_size=problemSize) # Remember the keyword "problem_size" best_pos1, best_fit1, list_loss1 = md2.train() print(md2.solution[1]) ## + array: 2 ways