Example #1
0
__author__ = 'Tom Schaul, [email protected]'

from pybrain.rl.tasks.capturegame import CaptureGameTask
from pybrain.structure.evolvables.cheaplycopiable import CheaplyCopiable
from pybrain.rl.learners import ES
from pybrain.utilities import storeCallResults
from pybrain.rl.agents.capturegameplayers.killing import KillingPlayer

# task settings: opponent, averaging to reduce noise, board size, etc.
size = 5
simplenet = False
task = CaptureGameTask(size, averageOverGames=40, opponent=KillingPlayer)

# keep track of evaluations for plotting
res = storeCallResults(task)

if simplenet:
    # simple network
    from pybrain.tools.shortcuts import buildNetwork
    from pybrain import SigmoidLayer
    net = buildNetwork(task.outdim, task.indim, outclass=SigmoidLayer)
else:
    # specialized mdrnn variation
    from pybrain.structure.networks.custom.capturegame import CaptureGameNetwork
    net = CaptureGameNetwork(size=size, hsize=2, simpleborders=True)

net = CheaplyCopiable(net)
print net.name, 'has', net.paramdim, 'trainable parameters.'

learner = ES(task, net, mu=5, lambada=5, verbose=True, noisy=True)
Example #2
0
""" An illustration of using the NSGA-II multi-objective optimization algorithm 
on a simple standard benchmark function. """

__author__ = 'Tom Schaul, [email protected]'

from pybrain.rl.learners.blackboxoptimizers.evolution.nsga2 import MultiObjectiveGA
from pybrain.rl.environments.functions.multiobjective import KurBenchmark
from pybrain.utilities import storeCallResults
import pylab
from scipy import zeros
       
# The benchmark function
f = KurBenchmark()

# keep track of all evaluations
res = storeCallResults(f)

# start at the origin
x0 = zeros(f.indim)

# the optimization for a maximum of 2500 function evaluations
n = MultiObjectiveGA(f, x0)
n.learn(2500)

# plotting the results (blue = all evaluated points, red = resulting pareto front)
for x in res: pylab.plot([x[1]], [x[0]], 'b+')
for x in n.bestEvaluation: pylab.plot([x[1]], [x[0]], 'ro')
pylab.show()
Example #3
0
__author__ = 'Tom Schaul, [email protected]'

from pybrain.rl.environments.twoplayergames import CaptureGameTask
from pybrain.structure.evolvables.cheaplycopiable import CheaplyCopiable
from pybrain.optimization import ES
from pybrain.utilities import storeCallResults
from pybrain.rl.environments.twoplayergames.capturegameplayers.killing import KillingPlayer

# task settings: opponent, averaging to reduce noise, board size, etc.
size = 5
simplenet = False
task = CaptureGameTask(size, averageOverGames = 40, opponent = KillingPlayer)

# keep track of evaluations for plotting
res = storeCallResults(task)

if simplenet:
    # simple network
    from pybrain.tools.shortcuts import buildNetwork
    from pybrain import SigmoidLayer
    net = buildNetwork(task.outdim, task.indim, outclass = SigmoidLayer)
else:
    # specialized mdrnn variation
    from pybrain.structure.networks.custom.capturegame import CaptureGameNetwork
    net = CaptureGameNetwork(size = size, hsize = 2, simpleborders = True)

net = CheaplyCopiable(net)
print net.name, 'has', net.paramdim, 'trainable parameters.'

learner = ES(task, net, mu = 5, lambada = 5,
Example #4
0
""" An illustration of using the NSGA-II multi-objective optimization algorithm 
on a simple standard benchmark function. """

__author__ = 'Tom Schaul, [email protected]'

from pybrain.rl.learners.blackboxoptimizers.evolution.nsga2 import MultiObjectiveGA
from pybrain.rl.environments.functions.multiobjective import KurBenchmark
from pybrain.utilities import storeCallResults
import pylab
from scipy import zeros

# The benchmark function
f = KurBenchmark()

# keep track of all evaluations
res = storeCallResults(f)

# start at the origin
x0 = zeros(f.indim)

# the optimization for a maximum of 2500 function evaluations
n = MultiObjectiveGA(f, x0)
n.learn(2500)

# plotting the results (blue = all evaluated points, red = resulting pareto front)
for x in res:
    pylab.plot([x[1]], [x[0]], 'b+')
for x in n.bestEvaluation:
    pylab.plot([x[1]], [x[0]], 'ro')
pylab.show()