def test_FA_iters_fine(self): task = Task(D=10, nGEN=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = FireflyAlgorithm(task=task, NP=25) algo.run() iters = algo.task.iters() self.assertEqual(iters, 1000)
def test_FA_evals_fine(self): task = Task(D=10, nFES=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = FireflyAlgorithm(task=task, NP=25) algo.run() evals = algo.task.evals() self.assertEqual(evals, 1000)
class FATestCase(TestCase): def setUp(self): self.fa = FireflyAlgorithm(10, 20, 1000, 0.5, 0.2, 1.0, MyBenchmark()) self.fa_griewank = FireflyAlgorithm(10, 20, 1000, 0.5, 0.2, 1.0, 'griewank') def test_works_fine(self): self.assertTrue(self.fa.run()) def test_griewank_works_fine(self): self.assertTrue(self.fa_griewank.run())
def RunAlgorithm(self, NUM_RUNS,D,Np,nFES,BenchFunction): rawData = np.zeros(NUM_RUNS) for i in range(NUM_RUNS): task = StoppingTask(D=D, nFES=nFES, optType=OptimizationType.MINIMIZATION, benchmark=BenchFunction) algo = FireflyAlgorithm(NP=Np, alpha=0.5, betamin=0.2, gamma=1.0) best = algo.run(task=task) rawData[i] = best[1] print(rawData[i]) return rawData
def optimize(bench, algo): average_mfo = 0 average_de = 0 average_abc = 0 average_pso = 0 average_ba = 0 average_fa = 0 average_ga = 0 for i in np.arange(epoch): mfo = MothFlameOptimizer(D=dim, NP=pop, nGEN=maxIter, benchmark=bench) de = DifferentialEvolution(D=dim, NP=pop, nGEN=maxIter, benchmark=bench) abc = ArtificialBeeColonyAlgorithm(D=dim, NP=pop, nFES=maxIter, benchmark=bench) pso = ParticleSwarmAlgorithm(D=dim, NP=pop, nGEN=maxIter, benchmark=bench) ba = BatAlgorithm(D=dim, NP=pop, nFES=maxIter, benchmark=bench) fa = FireflyAlgorithm(D=dim, NP=pop, nFES=maxIter, benchmark=bench) ga = GeneticAlgorithm(D=dim, NP=pop, nFES=maxIter, benchmark=bench) gen, best_de = de.run() gen, best_mfo = mfo.run() gen, best_abc = abc.run() gen, best_pso = pso.run() gen, best_ba = ba.run() gen, best_fa = fa.run() gen, best_ga = ga.run() average_mfo += best_de / epoch average_de += best_mfo / epoch average_abc += best_abc / epoch average_pso += best_pso / epoch average_ba += best_ba / epoch average_fa += best_fa / epoch average_ga += best_ga / epoch print(algo, ': DE Average of Bests over', epoch, 'run: ', average_de) print(algo, ': MFO Average of Bests over', epoch, 'run: ', average_mfo) print(algo, ': ABC Average of Bests over', epoch, 'run: ', average_abc) print(algo, ': PSO Average of Bests over', epoch, 'run: ', average_pso) print(algo, ': BA Average of Bests over', epoch, 'run: ', average_ba) print(algo, ': FA Average of Bests over', epoch, 'run: ', average_fa) print(algo, ': GA Average of Bests over', epoch, 'run: ', average_ga) return [ average_de, average_mfo, average_abc, average_pso, average_ba, average_fa, average_ga ]
def run_defult(): for i in range(10): Algorithm = FireflyAlgorithm(D=10, NP=20, nFES=50000, alpha=0.5, betamin=0.2, gamma=1.0, benchmark=MyBenchmark()) Best = Algorithm.run() plt.plot(global_vector) global_vector = [] logger.info(Best) plt.xlabel('Number of evaluations') plt.ylabel('Fitness function value') plt.title('Convergence plot') plt.show()
def logging_example(D=10, nFES=50000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **no): task = TaskConvPrint(D=D, nFES=nFES, nGEN=50000, optType=optType, benchmark=optFunc()) algo = FireflyAlgorithm(NP=20, alpha=0.5, betamin=0.2, gamma=1.0, seed=seed, task=task) best = algo.run() logger.info('%s %s' % (best[0], best[1]))
def simple_example(runs=10, D=10, nFES=50000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **no): for i in range(10): algo = FireflyAlgorithm(D=D, NP=20, nFES=nFES, alpha=0.5, betamin=0.2, gamma=1.0, seed=seed, optType=optType, benchmark=optFunc()) Best = algo.run() logger.info('%s %s' % (Best[0], Best[1]))
# encoding=utf8 # This is temporary fix to import module from parent folder # It will be removed when package is published on PyPI import sys sys.path.append('../') # End of fix from NiaPy.algorithms.basic import FireflyAlgorithm from NiaPy.util import StoppingTask, OptimizationType from NiaPy.benchmarks import Sphere # we will run Firefly Algorithm for 5 independent runs for i in range(5): task = StoppingTask(D=10, nFES=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = FireflyAlgorithm(NP=20, alpha=0.5, betamin=0.2, gamma=1.0) best = algo.run(task=task) print('%s -> %s' % (best[0], best[1]))
import logging from NiaPy.algorithms.basic import FireflyAlgorithm logging.basicConfig() logger = logging.getLogger('examples') logger.setLevel('INFO') class MyBenchmark(object): def __init__(self): self.Lower = -11 self.Upper = 11 def function(self): def evaluate(D, sol): val = 0.0 for i in range(D): val = val + sol[i] * sol[i] return val return evaluate for i in range(10): Algorithm = FireflyAlgorithm(10, 20, 10000, 0.5, 0.2, 1.0, MyBenchmark()) Best = Algorithm.run() logger.info(Best)