Пример #1
0
# The Deb function
#f = Deb()
# The Pol function
f = Pol()

# start at the origin
x0 = zeros(f.indim)

x0 = array([min_ for min_, max_ in f.xbound])

# the optimization for a maximum of 25 generations
n = MultiObjectiveGA(f,
                     x0,
                     storeAllEvaluations=True,
                     populationSize=50,
                     eliteProportion=1.0,
                     topProportion=1.0,
                     mutationProb=0.5,
                     mutationStdDev=0.1,
                     storeAllPopulations=True,
                     allowEquality=False)
print 'Start Learning'
n.learn(30)
print 'End Learning'

# plotting the results (blue = all evaluated points, red = resulting pareto front)
print 'Plotting the Results'
print 'All Evaluations'
for x in n._allEvaluations:
    pylab.plot([x[0]], [x[1]], 'b.')
for x in n.bestEvaluation:
    pylab.plot([x[0]], [x[1]], 'ro')
Пример #2
0
#!/usr/bin/env python
""" An illustration of using the NSGA-II multi-objective optimization algorithm
on a simple standard benchmark function. """

__author__ = 'Tom Schaul, [email protected]'

from pybrain.optimization import MultiObjectiveGA
from pybrain.rl.environments.functions.multiobjective import KurBenchmark
import pylab
from scipy import zeros

# The benchmark function
f = KurBenchmark()

# start at the origin
x0 = zeros(f.indim)

# the optimization for a maximum of 25 generations
n = MultiObjectiveGA(f, x0, storeAllEvaluations=True)
n.learn(25)

# plotting the results (blue = all evaluated points, red = resulting pareto front)
for x in n._allEvaluations:
    pylab.plot([x[1]], [x[0]], 'b+')
for x in n.bestEvaluation:
    pylab.plot([x[1]], [x[0]], 'ro')
pylab.show()
Пример #3
0
""" An illustration of using the NSGA-II multi-objective optimization algorithm 
on a simple standard benchmark function. """

__author__ = 'Tom Schaul, [email protected]'

from pybrain.optimization import MultiObjectiveGA
from pybrain.rl.environments.functions.multiobjective import KurBenchmark
import pylab
from scipy import zeros
       
# The benchmark function
f = KurBenchmark()

# start at the origin
x0 = zeros(f.indim)

# the optimization for a maximum of 25 generations
n = MultiObjectiveGA(f, x0, storeAllEvaluations = True)
n.learn(25)

# plotting the results (blue = all evaluated points, red = resulting pareto front)
for x in n._allEvaluations: pylab.plot([x[1]], [x[0]], 'b+')
for x in n.bestEvaluation: pylab.plot([x[1]], [x[0]], 'ro')
pylab.show()
Пример #4
0
from pybrain.rl.environments.functions.multiobjective import Deb, Pol
import pylab
from scipy import zeros, array

# The Deb function
#f = Deb()
# The Pol function
f = Pol()

# start at the origin
x0 = zeros(f.indim)

x0 = array([min_ for min_, max_ in f.xbound])

# the optimization for a maximum of 25 generations
n = MultiObjectiveGA(f, x0, storeAllEvaluations = True, populationSize = 50, eliteProportion = 1.0,
    topProportion = 1.0, mutationProb = 0.5, mutationStdDev = 0.1, storeAllPopulations = True, allowEquality = False)
print 'Start Learning'
n.learn(30)
print 'End Learning'

# plotting the results (blue = all evaluated points, red = resulting pareto front)
print 'Plotting the Results'
print 'All Evaluations'
for x in n._allEvaluations: pylab.plot([x[0]], [x[1]], 'b.')
for x in n.bestEvaluation: pylab.plot([x[0]], [x[1]], 'ro')
pylab.show()
print 'Pareto Front'
for x in n.bestEvaluation: pylab.plot([x[0]], [x[1]], 'ro')
pylab.show()
print '==========='
print '= Results ='