def setUp(self): self.D, self.nFES, self.nGEN = 10, 10, 10 self.t = Task(D=self.D, nFES=self.nFES, nGEN=self.nGEN, benchmark=MyBenchmark()) d1, d2 = self.t.bcLower( ) + self.t.bcRange() / 2, self.t.bcRange() * 0.2 L, U = d1, d1 + d2 self.tc = ScaledTask(self.t, L, U)
def add_algorithm(self, name: str) -> None: """Adds new genetic algorithm to simulation. Args: name: A string representing algorithm name. """ if name is None or len(name) < 1: raise InvalidAlgorithmName( 'Invalid algorithm name "{0}"'.format(name)) try: alg_type = globals()[str(name)] alg_obj = alg_type(seed=random.randint(1, 9999), task=Task( D=self._dataset.total_packages, nFES=self.n_fes, benchmark=BenchmarkC(dataset=self._dataset), optType=OptimizationType.MINIMIZATION), NP=self.np) self._algorithms.append(alg_obj) self.logger.console_log('added algorithm {0}'.format(name)) except Exception: raise InvalidAlgorithmName( 'Invalid algorithm name "{0}"'.format(name))
def main(file_name, algorithm, iterations, population_size, phenotype_coding): """Main function. Function is used for connecting the main parts of a project. Firstly, it calls deletion of before created image directories. Then it calls file reading method and so gets parsed objects from it. It creates new task with given information and runs it using selected evolutionary algorithm. Lastly, it calls printing information of overall best instance to output. Args: file_name: A string, indicating name of a file, which will be read. algorithm: A NiaPy algorithm, indicating evolutionary algorithm that will be used. iterations: An integer, indicating number of repetitions. population_size: An integer, indicating number of instances that will be created inside one generation. phenotype_coding: An enum type, indicating which genotype-to-phenotype coding will be used in evaluation. Returns: Method does not return anything. """ directory.Directory().delete_directories() objects = file.File.read('../datasets/' + file_name) task = Task(D=len(objects[1]), nFES=iterations, benchmark=evaluation.Evaluation( objects, iterations, population_size, phenotype_coding), optType=OptimizationType.MINIMIZATION) alg = algorithm(seed=randint(1000, 10000), task=task, NP=population_size) result, fitness = alg.run() print_result(evaluation.Evaluation.find_overall_best_instance(fitness))
def setUp(self): self.D = 20 self.x, self.task = rnd.uniform(-100, 100, self.D), Task(self.D, 230, inf, MyBenchmark()) self.s1, self.s2, self.s3 = Individual(x=self.x, e=False), Individual( task=self.task, rand=rnd), Individual(task=self.task)
def test_init_population_individual_fine(self): r"""Test if custome generation initialization works ok.""" a = Algorithm(NP=10, InitPopFunc=init_pop_individual, itype=Individual) t = Task(D=20, benchmark=MyBenchmark()) i = Individual(x=full(t.D, 0.0), task=t) pop, fpop, d = a.initPopulation(t) for e in pop: self.assertEqual(i, e)
def simple_example(alg, cec, fnum=1, runs=10, D=10, nFES=50000, nGEN=5000, seed=[None], optType=OptimizationType.MINIMIZATION, optFunc=MinMB, wout=False, sr=[-100, 100], **kwu): bests, func = list(), getCecBench(cec, D) for i in range(runs): task = Task(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc(func, sr[0], sr[1], fnum)) algo = alg(seed=seed[i % len(seed)], task=task) best = algo.run() logger.info('%s %s' % (best[0], best[1])) bests.append(best) if wout: bpos, bval = asarray([x[0] for x in bests]), asarray([x[1] for x in bests]) savetxt('%s_%d_%d_p' % (algo.Name[-1], fnum, D), bpos) savetxt('%s_%d_%d_v' % (algo.Name[-1], fnum, D), bval)
def __init__(self, **kwargs): r"""Initialize algorithm and create name for an algorithm. **Arguments:** name {string} -- full name of algorithm shortName {string} -- short name of algorithm NP {integer} -- population size D {integer} -- dimension of the problem nGEN {integer} -- number of generations/iterations nFES {integer} -- number of function evaluations benchmark {object} -- benchmark implementation object task {Task} -- optimization task to perform **Raises:** TypeError -- raised when given benchmark function does not exist **See**: Algorithm.setParameters(self, **kwargs) """ task, self.Rand = kwargs.pop('task', None), rand.RandomState(kwargs.pop('seed', None)) self.task = task if task is not None else Task(kwargs.pop('D', 10), nFES=kwargs.pop('nFES', inf), nGEN=kwargs.pop('nGEN', inf), benchmark=kwargs.pop('benchmark', 'ackley'), optType=kwargs.pop('optType', OptimizationType.MINIMIZATION)) self.setParameters(**kwargs)
def test_setBenchmark(self): task = Task(D=10, nFES=10, nGEN=10, optType=OptimizationType.MINIMIZATION, benchmark=MyBenchmark()) a = self.a.setBechmark(task) self.assertIsInstance(a, Algorithm)
def test_FA_evals_fine(self): task = Task(D=10, nFES=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = FireflyAlgorithm(task=task, NP=25) algo.run() evals = algo.task.evals() self.assertEqual(evals, 1000)
def test_DE_iters_fine(self): task = Task(D=10, nGEN=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = DifferentialEvolution(task=task, NP=40, CR=0.9, F=0.5) algo.run() iters = algo.task.iters() self.assertEqual(iters, 1000)
def setUp(self): self.D = 20 self.x, self.task = rnd.uniform(-2, 2, self.D), Task(self.D, 230, inf, MyBenchmark()) self.sol1, self.sol2, self.sol3 = MkeSolution( x=self.x, e=False), MkeSolution(task=self.task), MkeSolution(x=self.x, e=False)
def test_FA_iters_fine(self): task = Task(D=10, nGEN=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = FireflyAlgorithm(task=task, NP=25) algo.run() iters = algo.task.iters() self.assertEqual(iters, 1000)
def test_BA_iters_to_fes(self): task = Task(D=10, nGEN=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = BatAlgorithm(task=task, NP=10) algo.run() evals = algo.task.evals() self.assertEqual(evals, 10010)
def setUp(self): self.D, self.F, self.CR = 10, 0.9, 0.3 self.x, self.task = rnd.uniform(10, 50, self.D), Task(self.D, 230, None, MyBenchmark()) self.s1, self.s2 = SolutionjDE(task=self.task, e=False), SolutionjDE(x=self.x, CR=self.CR, F=self.F)
def setUp(self): self.D, self.F, self.CR = 10, 0.9, 0.3 self.x, self.task = rnd.uniform(10, 50, self.D), Task(self.D, nFES=230, nGEN=None, benchmark=MyBenchmark()) self.s1, self.s2 = AgingIndividualJDE( task=self.task, e=False), AgingIndividualJDE(x=self.x, CR=self.CR, F=self.F)
def simple_example(alg, runs=10, D=10, nFES=50000, nGEN=10000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn): for i in range(runs): task = Task(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc()) algo = alg(seed=seed, task=task) best = algo.run() logger.info('%s %s' % (best[0], best[1]))
class ScaledTaskTestCase(TestCase): def setUp(self): self.D, self.nFES, self.nGEN = 10, 10, 10 self.t = Task(self.D, self.nFES, self.nGEN, MyBenchmark()) d1, d2 = self.t.bcLower() + self.t.bcRange() / 2, self.t.bcRange() * 0.2 L, U = d1, d1 + d2 self.tc = ScaledTask(self.t, L, U) def test_isFeasible_fine(self): x = full(self.D, 10) self.assertTrue(self.t.isFeasible(x)) self.assertTrue(self.tc.isFeasible(x)) x = full(self.D, -10) self.assertTrue(self.t.isFeasible(x)) self.assertTrue(self.tc.isFeasible(x)) x = rnd.uniform(-10, 10, self.D) self.assertTrue(self.t.isFeasible(x)) self.assertTrue(self.tc.isFeasible(x)) x = full(self.D, -20) self.assertFalse(self.t.isFeasible(x)) self.assertFalse(self.tc.isFeasible(x)) x = full(self.D, 20) self.assertFalse(self.t.isFeasible(x)) self.assertFalse(self.tc.isFeasible(x)) def test_nextIter_fine(self): for i in range(self.nGEN): self.assertFalse(self.t.stopCond()) self.assertFalse(self.tc.stopCond()) self.t.nextIter() self.assertTrue(self.t.stopCond()) self.assertTrue(self.tc.stopCond()) def test_nextIter_two_fine(self): for i in range(int(self.nGEN / 2)): self.assertFalse(self.t.stopCond()) self.assertFalse(self.tc.stopCond()) self.tc.nextIter() self.t.nextIter() self.assertTrue(self.t.stopCond()) self.assertTrue(self.tc.stopCond()) def test_stopCondI(self): for i in range(int(self.nGEN / 2)): self.assertFalse(self.t.stopCondI()) self.assertFalse(self.tc.stopCondI()) self.assertTrue(self.t.stopCondI()) self.assertTrue(self.tc.stopCondI()) def test_eval_fine(self): x = full(self.D, 0.0) for i in range(int(self.nFES / 2)): self.assertAlmostEqual(self.t.eval(x), 0.0, msg='Error at %s iteration!!!' % (i)) self.assertAlmostEqual(self.tc.eval(x), 0.0, msg='Error at %s iteration!!!' % (i)) self.assertEqual(self.t.eval(x), inf) self.assertEqual(self.tc.eval(x), inf) def test_eval_over_nFES_fine(self): x = full(self.D, 0.0) for i in range(int(self.nFES / 2)): self.t.eval(x) self.tc.eval(x) self.assertEqual(self.t.eval(x), inf) self.assertEqual(self.tc.eval(x), inf) def test_eval_over_nGEN_fine(self): x = full(self.D, 0.0) for i in range(int(self.nGEN / 2)): self.t.nextIter() self.tc.nextIter() self.assertEqual(self.t.eval(x), inf) self.assertEqual(self.tc.eval(x), inf) def test_nFES_count_fine(self): x = full(self.D, 0.0) for i in range(self.nFES): self.t.eval(x) self.assertEqual(self.t.evals(), 2 * i + 1, 'Error at %s. evaluation' % (i + 1)) self.tc.eval(x) self.assertEqual(self.tc.evals(), 2 * i + 2, 'Error at %s. evaluation' % (i + 1)) def test_nGEN_count_fine(self): x = full(self.D, 0.0) for i in range(int((self.nFES - 1 if self.nFES % 2 > 0 else self.nFES - 2) / 2)): self.t.nextIter() self.assertEqual(self.t.iters(), 2 * i + 1, 'Error at %s. iteration' % (i + 1)) self.tc.nextIter() self.assertEqual(self.tc.iters(), 2 * i + 2, 'Error at %s. iteration' % (i + 1)) def test_stopCond_evals_fine(self): x = full(self.D, 0.0) for i in range(int((self.nFES - 1 if self.nFES % 2 > 0 else self.nFES - 2) / 2)): self.assertFalse(self.t.stopCond()) self.t.eval(x) self.assertFalse(self.tc.stopCond()) self.tc.eval(x) self.assertFalse(self.t.stopCond()) self.t.eval(x) self.assertFalse(self.tc.stopCond()) self.tc.eval(x) self.assertTrue(self.t.stopCond()) self.assertTrue(self.tc.stopCond()) def test_stopCond_iters_fine(self): x = full(self.D, 0.0) for i in range(int((self.nGEN - 1 if self.nGEN % 2 > 0 else self.nGEN - 2) / 2)): self.assertFalse(self.t.stopCond()) self.t.nextIter() self.assertFalse(self.tc.stopCond()) self.tc.nextIter() self.assertFalse(self.t.stopCond()) self.t.nextIter() self.assertFalse(self.tc.stopCond()) self.tc.nextIter() self.assertTrue(self.t.stopCond()) self.assertTrue(self.tc.stopCond())
def setUp(self): self.D, self.nFES, self.nGEN = 10, 10, 10 self.t = Task(self.D, self.nFES, self.nGEN, MyBenchmark())
class TaskTestCase(TestCase): def setUp(self): self.D, self.nFES, self.nGEN = 10, 10, 10 self.t = Task(self.D, self.nFES, self.nGEN, MyBenchmark()) def test_isFeasible_fine(self): x = full(self.D, 10) self.assertTrue(self.t.isFeasible(x)) x = full(self.D, -10) self.assertTrue(self.t.isFeasible(x)) x = rnd.uniform(-10, 10, self.D) self.assertTrue(self.t.isFeasible(x)) x = full(self.D, -20) self.assertFalse(self.t.isFeasible(x)) x = full(self.D, 20) self.assertFalse(self.t.isFeasible(x)) def test_nextIter_fine(self): for i in range(self.nGEN): self.assertFalse(self.t.stopCond()) self.t.nextIter() self.assertTrue(self.t.stopCond()) def test_stopCondI(self): for i in range(self.nGEN): self.assertFalse(self.t.stopCondI()) self.assertTrue(self.t.stopCondI()) def test_eval_fine(self): x = full(self.D, 0.0) for i in range(self.nFES): self.assertAlmostEqual(self.t.eval(x), 0.0, msg='Error at %s iteration!!!' % (i)) self.assertEqual(self.t.eval(x), inf) def test_eval_over_nFES_fine(self): x = full(self.D, 0.0) for i in range(self.nFES): self.t.eval(x) self.assertEqual(self.t.eval(x), inf) def test_eval_over_nGEN_fine(self): x = full(self.D, 0.0) for i in range(self.nGEN): self.t.nextIter() self.assertEqual(self.t.eval(x), inf) def test_nFES_count_fine(self): x = full(self.D, 0.0) for i in range(self.nFES): self.t.eval(x) self.assertEqual(self.t.Evals, i + 1, 'Error at %s. evaluation' % (i + 1)) def test_nGEN_count_fine(self): x = full(self.D, 0.0) for i in range(self.nGEN): self.t.nextIter() self.assertEqual(self.t.Iters, i + 1, 'Error at %s. iteration' % (i + 1)) def test_stopCond_evals_fine(self): x = full(self.D, 0.0) for i in range(self.nFES - 1): self.t.eval(x) self.assertFalse(self.t.stopCond()) self.t.eval(x) self.assertTrue(self.t.stopCond()) def test_stopCond_iters_fine(self): x = full(self.D, 0.0) for i in range(self.nGEN - 1): self.t.nextIter() self.assertFalse(self.t.stopCond()) self.t.nextIter() self.assertTrue(self.t.stopCond()) def test_unused_evals(self): x = full(self.D, 0.0) for i in range(self.nFES + 10): self.t.eval(x) self.assertEqual(self.t.unused_evals(), 10)
def setUp(self): self.D, self.nFES, self.nGEN = 10, 10, 10 self.t = Task(D=self.D, nFES=self.nFES, nGEN=self.nGEN, benchmark=MyBenchmark())
def test_init_population_numpy_fine(self): r"""Test if custome generation initialization works ok.""" a = Algorithm(NP=10, InitPopFunc=init_pop_numpy) t = Task(D=20, benchmark=MyBenchmark()) self.assertTrue(array_equal(full((10, t.D), 0.0), a.initPopulation(t)[0]))
sys.path.append('../') # End of fix import random import logging import numpy as np from NiaPy import Runner from NiaPy.util import Task, TaskConvPrint, TaskConvPlot, OptimizationType, getDictArgs from NiaPy.algorithms.modified import SelfAdaptiveDifferentialEvolution from NiaPy.algorithms.basic import DifferentialEvolution, MonkeyKingEvolutionV3 from NiaPy.benchmarks import Griewank, Sphere from NiaPy.algorithms.statistics import BasicStatistics NUM_RUNS = 10 # define number of runs stats = np.zeros(NUM_RUNS) for i in range(NUM_RUNS): task = Task( D=10, nFES=10000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) print ("Working on run: " + str(i+1)) algo = DifferentialEvolution(task=task, NP=40, CR=0.9, F=0.5) best = algo.run() stats[i] = best[1] # save best stat = BasicStatistics(stats) print stat.generate_standard_report() # show basic stats
sys.path.append('../') # End of fix import random import logging from NiaPy import Runner from NiaPy.util import Task, TaskConvPrint, TaskConvPlot, OptimizationType, getDictArgs from NiaPy.algorithms.modified import SelfAdaptiveDifferentialEvolution from NiaPy.algorithms.basic import DifferentialEvolution, MonkeyKingEvolutionV3 from NiaPy.benchmarks import Griewank, Sphere #1 Number of function evaluations (nFES) as a stopping criteria for i in range(10): task = Task(D=10, nFES=10000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = DifferentialEvolution(task=task, NP=40, CR=0.9, F=0.5) best = algo.run() print(best) print('---------------------------------------') #1 Number of generations (iterations) as a stopping criteria for i in range(10): task = Task(D=10, nGEN=100, optType=OptimizationType.MINIMIZATION, benchmark=Sphere()) algo = DifferentialEvolution(task=task, NP=40, CR=0.5) best = algo.run()