Пример #1
0
 def fitnessBattle(self, particle):
     options = capture.readCommand(self.flags)
     verbose = options.pop('verbose') # pop it off since runGame can't handle it
     #options['display'] = None
     options['numGames'] = 1
     options['record'] = False
     options['numTraining'] = False
     for agent in options['agents']:
         agent.setWeights(particle.position if agent.isRed else particle.bestKnown)
     games = capture.runGames(**options)
     assert(len(games) == 1)
     if(games[0].state.data.score < 0): #Blue wins
         return particle.bestKnownVal - 1
     
     options = capture.readCommand(self.flags)
     verbose = options.pop('verbose') # pop it off since runGame can't handle it
     #options['display'] = None
     options['numGames'] = 1
     options['record'] = False
     options['numTraining'] = False
     for agent in options['agents']:
         agent.setWeights(particle.bestKnown if agent.isRed else particle.position)
     games = capture.runGames(**options)
     assert(len(games) == 1)
     if(games[0].state.data.score > 0): #Red wins
         return particle.bestKnownVal - 1
     
     bestSwarm = True
     
     options = capture.readCommand(self.flags)
     verbose = options.pop('verbose') # pop it off since runGame can't handle it
     #options['display'] = None
     options['numGames'] = 1
     options['record'] = False
     options['numTraining'] = 0
     for agent in options['agents']:
         agent.setWeights(particle.position if agent.isRed else particle.swarm.bestKnown)
     games = capture.runGames(**options)
     assert(len(games) == 1)
     if(games[0].state.data.score < 0): #Blue wins
         return particle.swarm.bestKnownVal
     
     options = capture.readCommand(self.flags)
     verbose = options.pop('verbose') # pop it off since runGame can't handle it
     #options['display'] = None
     options['numGames'] = 1
     options['record'] = False
     options['numTraining'] = 0
     for agent in options['agents']:
         agent.setWeights(particle.swarm.bestKnown if agent.isRed else particle.position)
     games = capture.runGames(**options)
     assert(len(games) == 1)
     if(games[0].state.data.score > 0): #Red wins
         return particle.swarm.bestKnownVal
         
     return particle.swarm.bestKnownVal + 20
Пример #2
0
def eval_func(chromosome):
    # arguments for the pacman game
    argv = ["-r", "myTeam", "-b", "baselineTeam", "-l", "RANDOM107", "-Q", "-n", "3"]
    options = readCommand(argv)  # Get game components based on input
    options["chromosome"] = chromosome.genomeList
    
    games = runGames(**options)    

    # scores = [game.state.data.score - len(game.state.getBlueFood().asList()) + 2 for game in games];
    # scores = [game.state.data.score + 100.0 * exp(-len(game.state.getBlueFood().asList()) + 2) for game in games];
    # scores = [50.0 * exp(-len(game.state.getBlueFood().asList()) + 2) for game in games];
    
#    foodEaten = [4 - len(game.state.getBlueFood().asList()) for game in games]
#    foodLost = [4 - len(game.state.getRedFood().asList()) for game in games]
#    pacmanKills = []
#    scores = []
#    for i in range(len(games)):
#        killBalance = (games[i].state.data.score - (foodEaten[i] - foodLost[i]))
#        pacmanKills.append(killBalance)
#       # scores.append(foodEaten[i] - foodLost[i] + killBalance)
#        scores.append(foodEaten[i] - (foodLost[i] * 10) + (killBalance / 10.0))
    
    scores = [game.state.data.score for game in games]
    
    minScore = min(scores)
    avgScore = float(sum(scores)) / len(scores)
    
#    print "Chromosome: ",
#    print chromosome.genomeList,
#    print ""
#    print " got a score of: ",
#    print avgScore
    
    return avgScore
Пример #3
0
def eval_func():
    # arguments for the pacman game
    argv = ["-r", "myTeam", "-b", "baselineTeam", "-Q", "-n", "10"]
    options = readCommand(argv)  # Get game components based on input
    games = runGames(**options)

    scores = [game.state.data.score for game in games]

    average = sum(scores) / 10.0
    maxi = max(scores)
    mini = min(scores)
    return [average, maxi, mini]
Пример #4
0
 def __init__(self, optimizableDelegate, numParticles, flags, fitnessFunction = None):
     self.flags = flags
     opts = capture.readCommand(flags)
     self.verbose = opts['verbose']
     self.optimizableDelegate = optimizableDelegate
     self.numParticles = numParticles
     if(fitnessFunction == None):
         self.fitnessFunction = ParticleSwarmOptimizer.fitnessBattle
         self.fitnessInitializer = lambda x: 0
     else:
         self.fitnessFunction = fitnessFunction
         self.fitnessInitializer = fitnessFunction
     self.swarm = ParticleSwarm(optimizableDelegate, numParticles, self.fitnessInitializer)
Пример #5
0
 def __init__(self,
              optimizableDelegate,
              numParticles,
              flags,
              fitnessFunction=None):
     self.flags = flags
     opts = capture.readCommand(flags)
     self.verbose = opts['verbose']
     self.optimizableDelegate = optimizableDelegate
     self.numParticles = numParticles
     if (fitnessFunction == None):
         self.fitnessFunction = ParticleSwarmOptimizer.fitnessBattle
         self.fitnessInitializer = lambda x: 0
     else:
         self.fitnessFunction = fitnessFunction
         self.fitnessInitializer = fitnessFunction
     self.swarm = ParticleSwarm(optimizableDelegate, numParticles,
                                self.fitnessInitializer)
Пример #6
0
          return particle.swarm.bestKnownVal
          
      return particle.swarm.bestKnownVal + 20
      
          
if __name__ == '__main__':
    """
    Run this like you were running capture.py at the command line
    
    Important Flags to know:
    -n 4 # run 4 iterations
    -x 12 # with 12 particles
    -v # in verbose mode
    """
    flags = sys.argv[1:]
    options = capture.readCommand(flags)
    agents = options['agents']
    assert(len(agents) > 0)
    optimizableDelegate = agents[0]
    numIterations = options['numGames']
    numParticles = options['numTraining']
    verbose = options.pop('verbose')
    if(verbose):
        print("Calling swarm optimizer with numIterations = " + str(numIterations) + " numParticles = " + str(numParticles))
    optimizer = ParticleSwarmOptimizer(optimizableDelegate, numParticles, flags)
    bestValues = optimizer.optimize(numIterations)
    print("Best Values = ")
    print(bestValues)
    
    
    
Пример #7
0
optimizer = optim.Adam([A, b], lr=0.1)

for t in xrange(2000):
    # Set the gradients to 0
    optimizer.zero_grad()
    # Compute the current predicted y's from x_dataset
    y_predicted = model(x_dataset)
    # See how far off the prediction is
    current_loss = loss(y_predicted, y_dataset)
    # Compute the gradient of the loss with respect to A and b
    current_loss.backward()
    # Update A and b accordingly
    optimizer.step()
    print "t = %s, loss = %s, A = %s, b = %s" % (t, current_loss,
                                                 A.detach().numpy(), b.item())

if __name__ == '__main__':
    """
    The main function called when pacman.py is run
    from the command line:

    > python capture.py

    See the usage string for more details.

    > python capture.py --help
    """
    options = readCommand(sys.argv[1:])  # Get game components based on input
    games = runGames(**options)
Пример #8
0
	return ret


def qFunc(state, weights):
	s = np.asarray([state])
	w = np.asarray([weights]).T
	q = np.matmul(s, w)[0][0]
	return q


if __name__ == '__main__':
	for i in range(30):
		with open("record.txt", "w") as f:
		    pass
		argv = ["-r", "myTeam.py", "-b", "myTeam.py", "-l", "RANDOM", "-n", "2", "-q"]
		options = readCommand(argv)
		runGames(**options)
		argv = ["-r", "myTeam.py", "-b", "shibaReflex.py", "-l", "RANDOM", "-n", "2", "-q"]
		options = readCommand(argv)
		runGames(**options)
		argv = ["-r", "myTeam.py", "-b", "yellowdogReflex.py", "-l", "RANDOM", "-n", "2", "-q"]
		options = readCommand(argv)
		runGames(**options)
		records = readRecords("record.txt")
		with open("weights.txt", "r") as f:
			weights = ast.literal_eval(f.read())
		print(weights)
		new_weights = train(10, records, weights)
		print(new_weights)
		with open("weights.txt", "w") as f:
			f.write(json.dumps(new_weights))
Пример #9
0
            line = f.readline()
            self.learn_step_counter = int(line)
            line = f.readline()
            self.memory_counter = int(line)
            f.close()


if __name__ == "__main__":

    for i_episode in range(TRAINING_SIZE):
        print "PLAYING TRAINING GAME " + str(i_episode) + ":"
        arguments = [
            "-l", "RANDOM", "-r", "kizunaTeam", "-b", "baselineTeam", "-q",
            "--time", "80000"
        ]
        options = readCommand(arguments)
        games = runGames(**options)
        print "\n\n"

    EPSILON = 1.0
    for i_episode in range(TEST_SIZE):
        print "PLAYING TEST GAME " + str(i_episode) + ":"
        arguments = ["-l", "RANDOM", "-r", "kizunaTeam", "-b", "baselineTeam"]
        options = readCommand(arguments)
        games = runGames(**options)
        print "\n\n"

    # print('\nCollecting experience...')
    # for i_episode in range(TRAINING_SIZE):
    #
    #     s = env.reset()
Пример #10
0
    def fitnessBattle(self, particle):
        options = capture.readCommand(self.flags)
        verbose = options.pop(
            'verbose')  # pop it off since runGame can't handle it
        #options['display'] = None
        options['numGames'] = 1
        options['record'] = False
        options['numTraining'] = False
        for agent in options['agents']:
            agent.setWeights(
                particle.position if agent.isRed else particle.bestKnown)
        games = capture.runGames(**options)
        assert (len(games) == 1)
        if (games[0].state.data.score < 0):  #Blue wins
            return particle.bestKnownVal - 1

        options = capture.readCommand(self.flags)
        verbose = options.pop(
            'verbose')  # pop it off since runGame can't handle it
        #options['display'] = None
        options['numGames'] = 1
        options['record'] = False
        options['numTraining'] = False
        for agent in options['agents']:
            agent.setWeights(
                particle.bestKnown if agent.isRed else particle.position)
        games = capture.runGames(**options)
        assert (len(games) == 1)
        if (games[0].state.data.score > 0):  #Red wins
            return particle.bestKnownVal - 1

        bestSwarm = True

        options = capture.readCommand(self.flags)
        verbose = options.pop(
            'verbose')  # pop it off since runGame can't handle it
        #options['display'] = None
        options['numGames'] = 1
        options['record'] = False
        options['numTraining'] = 0
        for agent in options['agents']:
            agent.setWeights(
                particle.position if agent.isRed else particle.swarm.bestKnown)
        games = capture.runGames(**options)
        assert (len(games) == 1)
        if (games[0].state.data.score < 0):  #Blue wins
            return particle.swarm.bestKnownVal

        options = capture.readCommand(self.flags)
        verbose = options.pop(
            'verbose')  # pop it off since runGame can't handle it
        #options['display'] = None
        options['numGames'] = 1
        options['record'] = False
        options['numTraining'] = 0
        for agent in options['agents']:
            agent.setWeights(
                particle.swarm.bestKnown if agent.isRed else particle.position)
        games = capture.runGames(**options)
        assert (len(games) == 1)
        if (games[0].state.data.score > 0):  #Red wins
            return particle.swarm.bestKnownVal

        return particle.swarm.bestKnownVal + 20
Пример #11
0
            return particle.swarm.bestKnownVal

        return particle.swarm.bestKnownVal + 20


if __name__ == '__main__':
    """
    Run this like you were running capture.py at the command line
    
    Important Flags to know:
    -n 4 # run 4 iterations
    -x 12 # with 12 particles
    -v # in verbose mode
    """
    flags = sys.argv[1:]
    options = capture.readCommand(flags)
    agents = options['agents']
    assert (len(agents) > 0)
    optimizableDelegate = agents[0]
    numIterations = options['numGames']
    numParticles = options['numTraining']
    verbose = options.pop('verbose')
    if (verbose):
        print("Calling swarm optimizer with numIterations = " +
              str(numIterations) + " numParticles = " + str(numParticles))
    optimizer = ParticleSwarmOptimizer(optimizableDelegate, numParticles,
                                       flags)
    bestValues = optimizer.optimize(numIterations)
    print("Best Values = ")
    print(bestValues)
Пример #12
0
"""
Author:      XuLin Yang
Student id:  904904
Date:        2020-2-6 23:31:18
Description: code to test the performance between each agents
"""

import time
import capture
"""
["D:\python\python.exe",
                             "-m",
                              "../capture.py",
                              "--delay 0.0",
                              "-r ./pacman_ai/myTeam.py --redOpts=first=SearchAgent,second=SearchAgent",
                              "-b ../baselineTeam.py --blueOpts=first=DefensiveReflexAgent,second=DefensiveReflexAgent"]
"""

if __name__ == "__main__":
    start_time = time.time()
    options = capture.readCommand([
        "-r", "./teams/pacman_ai/myTeam.py",
        "--redOpts=first=InferenceAgent,second=InferenceAgent", "-b",
        "./baselineTeam.py",
        "--blueOpts=first=DefensiveReflexAgent,second=DefensiveReflexAgent"
    ])
    games = capture.runGames(**options)

    print('\nTotal Time Game: %s' % round(time.time() - start_time, 0))
Пример #13
0
def eval_func2(genome):
    argv = ["-r", "baselineTeam", "-b", "baselineTeam", "-q"]
    options = readCommand(argv)  # Get game components based on input
    runGames(**options)

    return 0