Exemplo n.º 1
0
	def __init__(self, p):	
		# get the model from the server
		model = self._download_model()

		# create an evaluator with these weights, and an ai agent using it
		evaluator = TutorialEvaluator(p, model)

                # TTT's constructor takes in the piece
                # that STARTS first (not the player's piece)
		self.board = TTT(3, START_PIECE, evaluator)

		# self.agent = AlphaBeta(4)
		self.agent = AlphaBeta()
Exemplo n.º 2
0
class federatedplayer:
	def __init__(self, p):	
		# get the model from the server
		model = self._download_model()

		# create an evaluator with these weights, and an ai agent using it
		evaluator = TutorialEvaluator(p, model)

                # TTT's constructor takes in the piece
                # that STARTS first (not the player's piece)
		self.board = TTT(3, START_PIECE, evaluator)

		# self.agent = AlphaBeta(4)
		self.agent = AlphaBeta()

	def update(self, move):
		self._apply(move)
	def move(self):
		move = self._move()
		self._apply(move)
		return move
	
	def _apply(self, move):
		self.board.update(move)

	def _move(self):
		return self.agent.next_move(self.board)
	
	def _download_model(self):
		server = flip.flip()
		server.connect()
		server.send_check()
		model = server.recv_model()
		server.disconnect()
		return model
Exemplo n.º 3
0
 def get_agent(self, task_id, board, depth):
     if task_id == self.GBFS:
         return GBFS(board)
     elif task_id == self.MINIMAX:
         return MiniMax(board, depth)
     else:
         return AlphaBeta(board, depth)
Exemplo n.º 4
0
 def move(self, state):
     """ Takes the output of the Alpha-Beta Minimax algorithm and uses it
             to tell the game where to place it's chip """
     ab = AlphaBeta(state)
     move = ab.next_move(self.difficulty, state, self.chip)
     return move
Exemplo n.º 5
0
from game import Game
from minimax import Minimax
from alpha_beta import AlphaBeta
from math import floor

f = open('input.txt','r')
boardSize = int(f.readline())
algo = f.readline().rstrip()
originPlayer = f.readline().rstrip()
searchDepth = int(f.readline())
boardValues = [["*" for i in range(boardSize)]for j in range (boardSize)]
originBoardState = [["*" for i in range(boardSize)]for j in range (boardSize)]
game = Game(boardValues, boardSize)

minimax = Minimax(searchDepth, game, originPlayer)
alphabeta = AlphaBeta(searchDepth, game, originPlayer)

# Set boardValues
for i in range(boardSize):
    line = f.readline().rstrip()
    line = line.split(" ")
    for j in range(boardSize):
        boardValues[i][j] = int(line[j])

# Set boardState
for i in range(boardSize):
    line = f.readline().rstrip()
    for j in range(boardSize):
        originBoardState[i][j] = line[j]
f.close()
Exemplo n.º 6
0
    start = time.time()
    
    # Initialize game board
    gameboard = Board()

    # Initialize red agent
    idx = input('Please choose a type for RED:\n'
                '1. Reflex\n'
                '2. Minimax\n'
                '3. Alpha Beta\n')
    if idx == '1':
        RED = Reflex('red')
    elif idx == '2':
        RED = Minimax('red')
    elif idx == '3':
        RED = AlphaBeta('red')

    # Initialize blue agent
    idx = input('Please choose a type for BLUE:\n'
                '1. Reflex\n'
                '2. Minimax\n'
                '3. Alpha Beta\n')
    if idx == '1':
        BLUE = Reflex('blue')
    elif idx == '2':
        BLUE = Minimax('blue')
    elif idx == '3':
        BLUE = AlphaBeta('blue')

    # Mark order or two agents
    _red_ = 1
Exemplo n.º 7
0
 def true_move_value(self, gameboard):
     """This method returns the true value associated with a particular state of the board.
        In reality, this value is  an estimate of the game state; it uses alpha beta with an evaluation function
     """
     smartAgent = AlphaBeta(self.color)
     return smartAgent.find_move_value(gameboard, depth=2)
Exemplo n.º 8
0
 def alpha_beta(self):
     agent = AlphaBeta(self.board, self.depth)
     board = agent.get_next_board()
     agent.output_next_state(board)
     agent.output_log()