def __init__(self, pacman_agent=agent.HumanAgent()): self.pacman_agent = pacman_agent pygame.init() pygame.font.init() self.screen = pygame.display.set_mode(config.SCREEN_SIZE) self.game = game.Game(self.pacman_agent)
def getAgent(agentType, playerNum): if agentType == 'human': return agt.HumanAgent(playerNum) elif agentType == 'random': return agt.RandomAgent() elif agentType == 'simple': return agt.SimpleAgent() elif agentType == 'reflex': return agt.ReflexAgent(playerNum) elif agentType == 'simple++': return agt.SimpleEnhancedAgent(playerNum)
def create_player_list(args): # Only need board_params and players in args board_params = args["board_params"] list_players = [] for i, player_args in enumerate(args["players"]): kwargs = removekey(player_args, "agent") if player_args["agent"] == "RandomAgent": list_players.append(agent.RandomAgent(f"Random_{i}")) elif player_args["agent"] == "PeacefulAgent": list_players.append(agent.PeacefulAgent(f"Peaceful_{i}")) elif player_args["agent"] == "FlatMCPlayer": list_players.append( agent.FlatMCPlayer(name=f'flatMC_{i}', **kwargs)) elif player_args["agent"] == "UCTPlayer": list_players.append(agent.UCTPlayer(name=f'UCT_{i}', **kwargs)) elif player_args["agent"] == "PUCTPlayer": world = World(board_params["path_board"]) board = Board( world, [agent.RandomAgent('Random1'), agent.RandomAgent('Random2')]) board.setPreferences(board_params) puct = load_puct(board, player_args) list_players.append(puct) elif player_args["agent"] == "NetPlayer": world = World(board_params["path_board"]) board = Board( world, [agent.RandomAgent('Random1'), agent.RandomAgent('Random2')]) board.setPreferences(board_params) netPlayer = load_NetPlayer(board, player_args) list_players.append(netPlayer) elif player_args["agent"] == "Human": hp_name = player_args["name"] if "name" in player_args else "human" hp = agent.HumanAgent(name=hp_name) list_players.append(hp) return list_players
def main(args=None): from optparse import OptionParser usage = "usage: %prog [options]" parser = OptionParser(usage=usage) parser.add_option("-p", "--player1", dest="player1", default="random", help="Choose type of first player") (opts, args) = parser.parse_args(args) evalArgs = load_weights() evalFn = aiAgents.nnetEval p1 = None if opts.player1 == 'random': p1 = agent.RandomAgent(game.Game.TOKENS[0]) elif opts.player1 == 'reflex': p1 = aiAgents.TDAgent(game.Game.TOKENS[0], evalArgs) elif opts.player1 == 'expectiminimax': p1 = aiAgents.ExpectiMiniMaxAgent(game.Game.TOKENS[0], evalFn, evalArgs) elif opts.player1 == 'human': p1 = agent.HumanAgent(game.Game.TOKENS[0]) # p2 = agent.RandomAgent(game.Game.TOKENS[1]) p2 = aiAgents.ExpectiMiniMaxAgent(game.Game.TOKENS[1], evalFn, evalArgs) if p1 is None: print "Please specify legitimate player" import sys sys.exit(1) play([p1, p2])
import agent import GUI import MCTS print('input side: \'black\' or \'white\'') side = input() print('input time limit for tree move (one number in seconds)') move_time = float(input()) agent1 = MCTS.MCTS('../weights/gomoku_nn', '../weights/gomoku_nn', sim_number=10000, linear=False, time=move_time, rollout_depth=10) agent2 = agent.HumanAgent() if side == 'black': agent1, agent2 = agent2, agent1 GUI.run_gui(agent1, agent2, delay=0)
import agent from util import printc, bcolors if len(sys.argv) != 4: print "Incorrect number of arguments. Please refer to the Readme file" exit() n = int(sys.argv[3]) depth = 2 width = 4 agents = [None, None] if sys.argv[1] == "MCTreeSearch": agents[0] = agent.MCTreeSearchAgent() elif sys.argv[1] == "HumanAgent": agents[0] = agent.HumanAgent(0) elif sys.argv[1] == "PureMC": agents[0] = agent.PureMCAgent() elif sys.argv[1] == "Minimax": agents[0] = agent.MinimaxAgent() if sys.argv[2] == "MCTreeSearch": agents[1] = agent.MCTreeSearchAgent() elif sys.argv[2] == "HumanAgent": agents[1] = agent.HumanAgent(1) elif sys.argv[2] == "PureMC": agents[1] = agent.PureMCAgent() elif sys.argv[2] == "Minimax": agents[1] = agent.MinimaxAgent() #agent = [agent.MCTreeSearchAgent(agent.twoPinAwayPolicy, agent.twoPinAwayPolicy, 1000), agent.HumanAgent(1)]
def main(args=None): from optparse import OptionParser usage = "usage: %prog [options]" parser = OptionParser(usage=usage) parser.add_option("-t", "--train", dest="train", action="store_true", default=False, help="Train TD Player") parser.add_option("-d", "--draw", dest="draw", action="store_true", default=False, help="Draw game") parser.add_option("-n", "--num", dest="numgames", default=1, help="Num games to play") parser.add_option("-p", "--player1", dest="player1", default="random", help="Choose type of first player") parser.add_option("-e", "--eval", dest="eval", action="store_true", default=False, help="Play with the better eval function for player") (opts, args) = parser.parse_args(args) weights = None if opts.train: weights = train() if opts.eval: weights = load_weights(weights) evalFn = submission.logLinearEvaluation evalArgs = weights else: evalFn = submission.simpleEvaluation evalArgs = None p1 = None if opts.player1 == 'random': p1 = agent.RandomAgent(game.Game.TOKENS[0]) elif opts.player1 == 'reflex': p1 = submission.ReflexAgent(game.Game.TOKENS[0], evalFn, evalArgs) elif opts.player1 == 'expectimax': p1 = submission.ExpectimaxAgent(game.Game.TOKENS[0], evalFn, evalArgs) elif opts.player1 == 'human': p1 = agent.HumanAgent(game.Game.TOKENS[0]) p2 = agent.RandomAgent(game.Game.TOKENS[1]) if p1 is None: print "Please specify legitimate player" import sys sys.exit(1) test([p1, p2], numGames=int(opts.numgames), draw=opts.draw)
def main(args=None): import sys print("Please choose the type of agent human or TDagent or random") line = sys.stdin.readline() from optparse import OptionParser usage = "usage: %prog [options]" parser = OptionParser(usage=usage) parser.add_option("-d", "--draw", dest="draw", action="store_true", default=False, help="Draw game") parser.add_option("-n", "--num", dest="numgames", default=1, help="Num games to play") parser.add_option("-p", "--player1", dest="player1", default=str(line.strip()), help="Choose type of first player") parser.add_option("-e", "--eval", dest="eval", action="store_true", default=True, help="Play with the better eval function for player") (opts, args) = parser.parse_args(args) weights = None weights1 = None if opts.eval: weights, weights1 = load_weights(weights, weights1) evalArgs = weights evalArgs1 = weights1 evalFn = aiAgents.nnetEval print("The choosen agent is: " + str(opts.player1)) p1 = None if str(opts.player1) == 'random': p1 = agent.RandomAgent(game.Game.TOKENS[0]) #print p1 elif opts.player1 == 'TDagent': p1 = aiAgents.TDAgent(game.Game.TOKENS[0], evalArgs1) elif opts.player1 == 'expectimax': p1 = aiAgents.ExpectimaxAgent(game.Game.TOKENS[0], evalFn, evalArgs) elif opts.player1 == 'expectiminimax': p1 = aiAgents.ExpectiMiniMaxAsgent(game.Game.TOKENS[0], evalFn, evalArgs) elif opts.player1 == 'human': p1 = agent.HumanAgent(game.Game.TOKENS[0]) p2 = aiAgents.TDAgent(game.Game.TOKENS[1], evalArgs) # p2 = aiAgents.ExpectiMiniMaxAgent(game.Game.TOKENS[1],evalFn,evalArgs) if opts.player1 == 'random': test([p1, p2], numGames=int(opts.numgames), draw=opts.draw) print("o is random") print("x is the agent") if opts.player1 == 'TDagent': #test([p1,p2],numGames=int(opts.numgames),draw=opts.draw) play([p1, p2]) if opts.player1 == 'human': play([p1, p2]) print("o is td(0)") print("x is the agent td(0.5)") if p1 is None: print "Please specify legitimate player" import sys sys.exit(1)
def main(args=None): # Parse command line arguments argNames = [ '-trainGames', '-run', '-p', '-test', '-agent', '-opp', '-tournament', '-human', '-verbose' ] parser = argparse.ArgumentParser(description='Process input parameters.') for arg in argNames: parser.add_argument(arg) namespace = parser.parse_args() numPlayers = int(getattr(namespace, 'p')) if getattr(namespace, 'p') else 3 numTrainGames = int(getattr(namespace, 'trainGames')) if getattr( namespace, 'trainGames') else 50 numTestGames = int(getattr(namespace, 'test')) if getattr( namespace, 'test') else 100 numRuns = int(getattr(namespace, 'run')) if getattr(namespace, 'run') else 10 agentType = getattr(namespace, 'agent') if getattr(namespace, 'agent') else 'model' oppType = getattr(namespace, 'opp') if getattr(namespace, 'opp') else 'simple' isTournament = (getattr(namespace, 'tournament') == 'y') isHuman = (getattr(namespace, 'human') == 'y') verbose = (getattr(namespace, 'verbose') == 'y') winRates = [] for i in range(numRuns): # Play a game with human agent if isHuman: agents = [agent.HumanAgent(0)] for j in range(1, numPlayers): if oppType == 'random': agents.append(agent.RandomAgent(j)) elif oppType == 'simple': agents.append( agent.ReflexAgent(j, evaluation.simpleEvaluation)) elif oppType == 'honest': agents.append(agent.HonestAgent(j)) elif oppType == 'dishonest': agents.append(agent.DishonestAgent(j)) elif oppType == 'bs': agents.append(agent.AlwaysCallBSAgent(j)) g = game.Game(numPlayers, NUM_DECKS) run_game(g, agents) # Play a game with computer agent else: agents = [] if agentType == 'random': agents.append(agent.RandomAgent(0)) elif agentType == 'simple': agents.append(agent.ReflexAgent(0, evaluation.simpleEvaluation)) elif agentType == 'honest': agents.append(agent.HonestAgent(0)) elif agentType == 'dishonest': agents.append(agent.DishonestAgent(0)) elif agentType == 'bs': agents.append(agent.AlwaysCallBSAgent(0)) elif agentType == 'reflex': if isTournament: w = tournament(numPlayers, numTrainGames) a = agent.ReflexAgent(0, logLinearEvaluation, w) agents.append(a) else: w = train(numPlayers, numTrainGames) a = agent.ReflexAgent(0, logLinearEvaluation, w) agents.append(a) elif agentType == 'model': if isTournament: w = tournament(numPlayers, numTrainGames, isModel=True) a = agent.ModelReflexAgent(0, numPlayers, logLinearEvaluation, w) agents.append(a) else: w = train(numPlayers, numTrainGames, isModel=True) a = agent.ModelReflexAgent(0, numPlayers, logLinearEvaluation, w) agents.append(a) for j in range(1, numPlayers): if oppType == 'random': agents.append(agent.RandomAgent(j)) elif oppType == 'simple': agents.append( agent.ReflexAgent(j, evaluation.simpleEvaluation)) elif oppType == 'honest': agents.append(agent.HonestAgent(j)) elif oppType == 'dishonest': agents.append(agent.DishonestAgent(j)) elif oppType == 'bs': agents.append(agent.AlwaysCallBSAgent(j)) winRates.append(test(agents, numTestGames, verbose)) if winRates: avgWinRate = sum(winRates) / numRuns print "Average win rate of agent is: {}".format(avgWinRate)
print("r was played in the game {} times".format(number_of_r)) dict_of_prs = { "p": number_of_p, "r": number_of_r, "s": number_of_s } most_freq = max(dict_of_prs, key=dict_of_prs.get) print('Most played move: {}'.format(most_freq)) print('=' * 20) def compare(self, move_a1, move_a2): if move_a1 == move_a2: return 0 elif (move_a1 == "r" and move_a2 == "p" or move_a1 == "p" and move_a2 == "s" or move_a1 == "s" and move_a2 == "r"): return 2 else: return 1 human = agent.HumanAgent() Computer = agent.InstructorAgent() game = Game(human, Computer) game.play(5) game.summary()