def test_DE_iters_fine(self):
     task = Task(D=10,
                 nGEN=1000,
                 optType=OptimizationType.MINIMIZATION,
                 benchmark=Sphere())
     algo = DifferentialEvolution(task=task, NP=40, CR=0.9, F=0.5)
     algo.run()
     iters = algo.task.iters()
     self.assertEqual(iters, 1000)
示例#2
0
def optimize(bench, algo):
    average_mfo = 0
    average_de = 0
    average_abc = 0
    average_pso = 0
    average_ba = 0
    average_fa = 0
    average_ga = 0

    for i in np.arange(epoch):
        mfo = MothFlameOptimizer(D=dim, NP=pop, nGEN=maxIter, benchmark=bench)
        de = DifferentialEvolution(D=dim,
                                   NP=pop,
                                   nGEN=maxIter,
                                   benchmark=bench)
        abc = ArtificialBeeColonyAlgorithm(D=dim,
                                           NP=pop,
                                           nFES=maxIter,
                                           benchmark=bench)
        pso = ParticleSwarmAlgorithm(D=dim,
                                     NP=pop,
                                     nGEN=maxIter,
                                     benchmark=bench)
        ba = BatAlgorithm(D=dim, NP=pop, nFES=maxIter, benchmark=bench)
        fa = FireflyAlgorithm(D=dim, NP=pop, nFES=maxIter, benchmark=bench)
        ga = GeneticAlgorithm(D=dim, NP=pop, nFES=maxIter, benchmark=bench)

        gen, best_de = de.run()
        gen, best_mfo = mfo.run()
        gen, best_abc = abc.run()
        gen, best_pso = pso.run()
        gen, best_ba = ba.run()
        gen, best_fa = fa.run()
        gen, best_ga = ga.run()

        average_mfo += best_de / epoch
        average_de += best_mfo / epoch
        average_abc += best_abc / epoch
        average_pso += best_pso / epoch
        average_ba += best_ba / epoch
        average_fa += best_fa / epoch
        average_ga += best_ga / epoch

    print(algo, ': DE Average of Bests over', epoch, 'run: ', average_de)
    print(algo, ': MFO Average of Bests over', epoch, 'run: ', average_mfo)
    print(algo, ': ABC Average of Bests over', epoch, 'run: ', average_abc)
    print(algo, ': PSO Average of Bests over', epoch, 'run: ', average_pso)
    print(algo, ': BA Average of Bests over', epoch, 'run: ', average_ba)
    print(algo, ': FA Average of Bests over', epoch, 'run: ', average_fa)
    print(algo, ': GA Average of Bests over', epoch, 'run: ', average_ga)

    return [
        average_de, average_mfo, average_abc, average_pso, average_ba,
        average_fa, average_ga
    ]
示例#3
0
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix

from NiaPy.algorithms.basic import DifferentialEvolution
from NiaPy.util import StoppingTask, OptimizationType
from NiaPy.benchmarks import Sphere

# we will run Differential Evolution for 5 independent runs
for i in range(5):
    task = StoppingTask(D=10, nFES=5000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
    algo = DifferentialEvolution(NP=50, F=0.5, CR=0.9)
    best = algo.run(task=task)
    print('%s -> %s' % (best[0].x, best[1]))

示例#4
0
# End of fix

import random
import logging
from NiaPy.util import StoppingTask, OptimizationType
from NiaPy.algorithms.basic import DifferentialEvolution
from NiaPy.benchmarks import Griewank, Sphere

#1 Number of function evaluations (nFES) as a stopping criteria
for i in range(10):
    task = StoppingTask(D=10,
                        nFES=10000,
                        optType=OptimizationType.MINIMIZATION,
                        benchmark=Sphere())
    algo = DifferentialEvolution(NP=40, CR=0.9, F=0.5)
    best = algo.run(task)
    print(best)

print('---------------------------------------')

#2 Number of generations (iterations) as a stopping criteria
for i in range(10):
    task = StoppingTask(D=10,
                        nGEN=1000,
                        optType=OptimizationType.MINIMIZATION,
                        benchmark=Sphere())
    algo = DifferentialEvolution(NP=40, CR=0.9, F=0.5)
    best = algo.run(task)
    print(best)

print('---------------------------------------')
示例#5
0
sys.path.append('../')
# End of fix

import random
import logging
import numpy as np
from NiaPy import Runner
from NiaPy.util import Task, TaskConvPrint, TaskConvPlot, OptimizationType, getDictArgs
from NiaPy.algorithms.modified import SelfAdaptiveDifferentialEvolution
from NiaPy.algorithms.basic import DifferentialEvolution, MonkeyKingEvolutionV3
from NiaPy.benchmarks import Griewank, Sphere
from NiaPy.algorithms.statistics import BasicStatistics

NUM_RUNS = 10  # define number of runs
stats = np.zeros(NUM_RUNS)

for i in range(NUM_RUNS):
    task = Task(
        D=10,
        nFES=10000,
        optType=OptimizationType.MINIMIZATION,
        benchmark=Sphere())
    print ("Working on run: " + str(i+1))
    algo = DifferentialEvolution(task=task, NP=40, CR=0.9, F=0.5)
    best = algo.run()
    stats[i] = best[1]  # save best


stat = BasicStatistics(stats)
print stat.generate_standard_report()  # show basic stats