Example #1
0
    def nextMove(self):
        player = self.turn

        # there are only 42 legal places for pieces on the board
        # exactly one piece is added to the board each turn
        if self.round > 42:
            self.finished = True
            # this would be a stalemate :(
            return
        
        # move is the column that player want's to play
        move = player.move(self.board)
        if move == None:
            move = random.randint(0,6)
            while self.board[0][move] == ' ':
                move = random.randint(0,6)
        for i in range(6):
            if self.board[i][move] == ' ':
                self.board[i][move] = player.color
                m = Minimax(self.board, j)
                print m.value(self.board, player.color)
                self.switchTurn()
                self.checkForFours()
                self.printState()
                return
        
        # if we get here, then the column is full
        print("Invalid move (column is full)")
        return
Example #2
0
def play(q_agent, human=0, helper=False, helper_depth=20):
    """
    Play human game against the QAgent.
    `human` can be set to 0 or 1 to specify whether
    human moves first or second.
    """

    connect4 = Connect4()
    helper = Minimax(max_depth=helper_depth) if helper else None

    while True:

        for l in range(0, 42, 7):
            row = ''.join([f"{connect4.board[l + i]}|" for i in range(7)])
            print(row[:13])
            print('-+-+-+-+-+-+-')

        actions = connect4.available_actions(connect4.board)

        if connect4.player == human:
            print("Your Move.")

            while True:
                column = int(input().strip()) - 1

                if not column in actions:
                    print(
                        'That place is already filled or invalid. Still your move.'
                    )
                else:
                    break

            # column, values = helper.get_move(connect4.board)
        else:
            print("QAgent's Move.")

            if helper:
                action, values = helper.get_move(connect4.board)
                if values.count(1000) >= 1 or values.count(-1000) >= 1:
                    column = action
                else:
                    column = q_agent.choose_action(connect4.board,
                                                   epsilon=False)
            else:
                column = q_agent.choose_action(connect4.board, epsilon=False)

            print(f"QAgent put a chip in column {column + 1}.")

        connect4.move(column)

        if connect4.result is not None:
            print("\nGAME OVER\n")

            winner = "Human" if connect4.result == human else "QAgent"
            print(f"Winner is {winner}")

            break

    if input("Play again?\n").lower() == "y":
        play(q_agent)
Example #3
0
 def _run_minimax(self, max_depth) -> State:
     minimax = Minimax(State(self._grid),
                       terminal_test_func=self._create_terminal_test(max_depth),
                       child_states_func=self._child_states,
                       eval_func=self._eval)
     best_child, _ = minimax.decision()
     return best_child
Example #4
0
    def move(self, state):
        print("{0}'s turn. ".format(self.name))

        m = Minimax(state)
        best_move, value = m.alphaBetaBestMove(self.difficulty, state, self.color)

        return best_move
Example #5
0
 def ai_vs_human(self, team):
     self.menubar.add_command(label="AI Go", command=self.ai_move)
     self.ai_team = team
     self.only_allow_valid_moves = True
     # AI time limit defined here
     self.m = Minimax(self.time_limit, True)
     self.master.update()
Example #6
0
 def __init__(self, game, depth, breadths, evaluate, tag=None):
     assert depth == len(breadths)
     self.game = game
     self.depth = depth
     self.evaluate = evaluate
     self.tag = tag
     self.minimax = Minimax(game, depth, breadths, evaluate)
Example #7
0
    def move(self, state):
        print("Robot's beurt(" + self.color + ")")

        # Voer minimax-algoritme uit
        m = Minimax(state)
        best_move, value = m.best_move(self.difficulty, state, self.color)
        return best_move
Example #8
0
class MinimaxPlayer:
    def __init__(self, game, depth, breadths, evaluate, tag=None):
        assert depth == len(breadths)
        self.game = game
        self.depth = depth
        self.evaluate = evaluate
        self.tag = tag
        self.minimax = Minimax(game, depth, breadths, evaluate)

    def choose_move(self):
        self.minimax.run_search()
        move = self.minimax.choose_move()
        self.minimax.reset_tree()
        return move

    def make_move(self, move):
        self.game.make_move(move)

    def __repr__(self):
        my_repr = ''
        my_repr += 'Minimax Player with depth {} ' \
                   'and evaluation function {}'.format(self.depth, self.evaluate.__name__)
        if self.tag:
            my_repr += ' tag: {}.'.format(self.tag)
        return my_repr
Example #9
0
 def move(self, state):
     print("{0}'s turn.  {0} is {1}, {2}".format(self.name, self.color,
                                                 self.ctr))
     self.ctr += 1
     m = Minimax(state)
     best_move, value = m.bestMove(self.difficulty, state, self.color)
     return best_move
Example #10
0
def main(args):
    if args.player1 == "human":
        agent1 = Human(1, surface)
    elif args.player1 == "minimax":
        agent1 = Minimax(1, args.minimax_depth[0], args.variant)
    elif args.player1 == "mcts":
        agent1 = MCTS(1, args.mcts_depth[0], args.mcts_rollouts[0],\
         args.variant, args.heuristic_rollouts[0], \
         args.input_file[0] if args.input_file else None, args.output_file[0] if args.output_file else None, args.ucb_const[0])

    if args.player2 == "human":
        agent2 = Human(-1, surface)
    elif args.player2 == "minimax":
        agent2 = Minimax(-1, args.minimax_depth[1], args.variant)
    elif args.player2 == "mcts":
        agent2 = MCTS(1, args.mcts_depth[1], args.mcts_rollouts[1],\
         args.variant, args.heuristic_rollouts[1], args.input_file[1] if len(args.input_file) == 2 else None,\
          args.output_file[1] if len(args.output_file) == 2 else None, args.ucb_const[1])

    for i in range(args.num_games):
        play_game(agent1, agent2, surface, args.variant, args.wait_between)
        if type(agent1) == MCTS:
            agent1.reset(1)
        if type(agent2) == MCTS:
            agent2.reset(-1)
        if args.alternate_sides:
            agent1.switch_sides()
            agent2.switch_sides()
            temp = agent1
            agent1 = agent2
            agent2 = temp
        if type(agent1) == MCTS:
            agent1.store_root()
        if type(agent2) == MCTS:
            agent2.store_root()
Example #11
0
class AIPlayer(Player):
    """ AIPlayer object that extends Player
        The AI algorithm is minimax, the difficulty parameter is the depth to which
        the search tree is expanded.
    """

    difficulty = None
    def __init__(self, name, color, difficulty=5):
        self.type = "AI"
        self.name = name
        self.color = color
        self.difficulty = difficulty
        self.m = Minimax()


    def move(self, state):
        print "{0}'s turn.  {0} is {1}".format(self.name, self.color)

        # sleeping for about 1 second makes it looks like he's thinking
        #time.sleep(random.randrange(8, 17, 1)/10.0)
        #return random.randint(0, 6)

        self.m.set_board(state)
        best_move, value = self.m.bestMove(self.difficulty, state)
        print best_move
        return best_move
Example #12
0
    def move(self, state):
        super().move(self)
        print("{0}'s turno.  {0} es {1}".format(self.nombre, self.color))

        m = Minimax(state)
        mejor_mov, valor = m.mejorMov(self.dificultad, state, self.color)

        return mejor_mov
Example #13
0
 async def _prepare_player(self, name):
     async with self._session.post(f'{self._api_url}/game',
                                   params={'team_name': name}) as resp:
         res = (await resp.json())['data']
         self._player_num = 1 if res['color'] == 'RED' else 2
         self._player = {'color': res['color'], 'token': res['token']}
         self.minimax = Minimax(self._player_num)
         print("PLAYER_NUM", self._player_num)
Example #14
0
def results_stats_memo():
    memo = dict()

    # evaluate the whole game tree
    Minimax.minimax_memo(9, TTT(), memo)

    counter = Counter(memo.values())
    print(counter)
Example #15
0
 def move(self, state):
     print("{0}'s turn.  {0} is {1}".format(self.name, self.color))
     
     # sleeping for about 1 second makes it looks like he's thinking
     #time.sleep(random.randrange(8, 17, 1)/10.0)
     #return random.randint(0, 6)
     
     m = Minimax(state)
     best_move, value = m.bestMove(self.difficulty, state, self.color)
     return best_move
Example #16
0
    def move(self, state):
        print("{0}'s turn.  {0} is {1}".format(self.name, self.color))

        m = Minimax(state)
        best_move, _ = m.bestMove(self.difficulty, state, self.color,
                                  self.heuristic)
        #SetMoveMi(best_move)
        print(self.name + ": " + str(best_move))

        return best_move
Example #17
0
    def move(self, state):
        print("{0}'s turn.  {0} is {1}".format(self.name, self.color))

        # sleeping for about 1 second makes it looks like he's thinking
        #time.sleep(random.randrange(8, 17, 1)/10.0)
        #return random.randint(0, 6)

        m = Minimax(state)
        best_move, value = m.bestMove(self.difficulty, state, self.color)
        return best_move
Example #18
0
    def test_next_move_gives_the_best_move_with_diagonals(self):
        minimax = Minimax(1)
        game = Game(4, 4)

        game.board = [
            [0, -1, 1, -1],
            [0, 1, -1, -1],
            [0, -1, 1, -1],
            [0, 0, 0, 1],
        ]

        self.assertEqual(minimax.next_move(game), 0)
Example #19
0
 def ai_vs_ai(self):
     m = Minimax(self.time_limit, True)
     i = 0
     while self.board.check_win() == Board.EMPTY:
         if i % 2 == 0:
             team = Board.GREEN
         else:
             team = Board.RED
         self.board = m.search(self.board, team)[0]
         i += 1
         self.display_board(self.board)
         self.master.update()
Example #20
0
    def test_next_move_gives_the_best_defence_move(self):
        minimax = Minimax(1)
        game = Game(4, 4)

        game.board = [
            [0, 0, 0, 0],
            [0, 0, 0, 0],
            [0, -1, -1, -1],
            [0, 0, 1, 1],
        ]

        self.assertEqual(minimax.next_move(game), 2)
Example #21
0
    def move(self, state):
        print(f"{self.name}'s turn.  {self.name} is {self.color}")

        # time.sleep(random.randrange(8, 17, 1) / 10.0)
        # return random.randint(0, 6)

        m = Minimax(state)
        best_move, value = m.best_move(state=state,
                                       depth=self.difficulty,
                                       alpha=-math.inf,
                                       beta=math.inf,
                                       maximizing_player=True)

        return best_move
Example #22
0
    def make_move(self):
        changed_board = self.change_board(settings.board)

        m = Minimax(changed_board)

        x = m.bestMove(self.depth, changed_board,
                       'x' if self.player == 1 else 'o')

        x = x[0]

        output = self.name + " played in column " + str(x) + "."

        print(output)

        return x
Example #23
0
    def alphaBeta(self, state, alpha, beta, depth):

        if state in self.table:
            return self.table[state].minimax_value

        state.updateWinCheck()
        if state.isTerminal():
            u = state.stateUtility()
            self.table[state] = Minimax(u, None)
            return u
        elif depth >= self.depth_cutoff:
            e = state.stateEval()
            self.table[state] = Minimax(e, None)
            return e
        elif state.turn == "MAX":
            best_minimax_so_far = np.NINF
            best_move_for_state = None

            children_states = state.genNextBoards()
            for i in range(len(state.moves)):
                if state.moves[i]:
                    child_state = children_states[i]
                    minimax_of_child = self.alphaBeta(child_state, alpha, beta, depth + 1)
                    if minimax_of_child > best_minimax_so_far:
                        best_minimax_so_far = minimax_of_child
                        best_move_for_state = i
                    if best_minimax_so_far >= beta:
                        return best_minimax_so_far
                    alpha = max(alpha, best_minimax_so_far)
            self.table[state] = Minimax(best_minimax_so_far, best_move_for_state)
            return best_minimax_so_far
        else:
            best_minimax_so_far = np.Inf
            best_move_for_state = None

            children_states = state.genNextBoards()
            for i in range(len(state.moves)):
                if state.moves[i]:
                    child_state = children_states[i]
                    minimax_of_child = self.alphaBeta(child_state, alpha, beta, depth + 1)
                    if minimax_of_child < best_minimax_so_far:
                        best_minimax_so_far = minimax_of_child
                        best_move_for_state = i
                    if best_minimax_so_far <= alpha:
                        return best_minimax_so_far
                    beta = min(beta, best_minimax_so_far)
            self.table[state] = Minimax(best_minimax_so_far, best_move_for_state)
            return best_minimax_so_far
Example #24
0
    def move(self, state, timeLimit):
        print("{0}'s turn.  {0} is {1}".format(self.name, self.color))

        if self.difficulty == 6:
            m = AlphaBeta(state)
            start = time.clock()
            best_move, value, depth = m.bestMove(30, state, self.color,
                                                 timeLimit)
            print("Alpha: ", value)
            print("Elapsed:", time.clock() - start)
            print("Depth Reached:", depth)
            return best_move, depth

        elif self.difficulty == 7:
            m = Greedy(state)
            time.sleep(randrange(8, 17, 1) / 10.0)
            best_move = m.best_move(state, self.color)
            print("guess greedy worked")
            return best_move, 1

        else:
            m = Minimax(state)
            start = time.clock()
            best_move, value, depth = m.bestMove(30, state, self.color,
                                                 timeLimit)
            print("Alpha: ", value)
            print("Elapsed:", time.clock() - start)
            print("Depth Reached:", depth)
            return best_move, depth
Example #25
0
File: c4.py Project: mg515/connect4
 def risanje(self, igralec, p, m):
     if igralec==1:
         barva="steel blue"
     else: barva="chocolate"
     r = deepcopy(self.stanje)
     r = transponiraj(r)
     r = obrni(r)
     minimax1 = Minimax(r)
     s = minimax1.vrednostStanja(r, igralec)
     print(s)
     v = 6
     g=self.platno.create_rectangle(100*p+v, v, 100+100*p-1, 100-1, fill=barva, outline="white")
     for i in range(0, m*100):
         self.platno.move(g, 0, 1)
         self.platno.update()
         time.sleep(1/(50*i+5))
Example #26
0
	def ai(self,event):
		print "AI"
		self.new_game()
		ai = Minimax()
		while not self.board.has_lost():
			ai.state = self.board
			move = ai.get_move()
			self.board.move(move)
			if self.board.valid_move:
				new_tile = Tile(None)
				new_tile.set_start_value()
				self.board.set_new_tile(new_tile)
				self.update_board()
			else:
				self.update_board()
				break
		self.game_ended()
Example #27
0
 def __compare_bots(self, iterations):
     if self.state.playerToMove == self.player1:
         assert self.state.playerToMove.algorithm == "ISMCTS"
         return self.smart_ismcts.get_move(rootstate=self.state,
                                           itermax=iterations,
                                           verbose=False)
     else:
         assert self.state.playerToMove.algorithm == "Minimax"
         return Minimax.get_move(self.state)
Example #28
0
 def minimax_player(
     self,
     state,
     depth=2500000,
     team=1,
     heuristic_parameter=True
 ):  # creates first successors to implement minimax algorithm
     new_shape_x = np.asarray(state[1]).shape
     player1 = Minimax(n=new_shape_x,
                       default_team=team,
                       advance_option=heuristic_parameter)
     print('default_team', team, player1.default_team)
     if team == -1:
         state = player1.convert_board_state(state)
     best_move = player1.decision_maker(state, depth)
     chosen_succ, utility = best_move
     if team == -1:
         chosen_succ = player1.convert_board_state(chosen_succ)
     return chosen_succ
Example #29
0
def results_stats():
    counter = defaultdict(int)

    for index, board in enumerate(gen.all_positions_unique(TTT())):
        hash = str(board)
        evaluation = Minimax.minimax(9, board)
        counter[evaluation] += 1
        print(f"{index+1}: {hash} ==> {evaluation}")

    print(counter)
Example #30
0
    def __init__(self, render=True, ros_node=None):
        self.board = C4Board(render)
        self.max = Minimax()

        if not ros_node:
            rospy.init_node("connect4", anonymous=True)

        self.ur5_commander = rospy.Publisher("behaviors_cmd",
                                             String,
                                             queue_size=10)
        self.token_grabber = rospy.Publisher("gripper", Int16, queue_size=10)
        self.computer_vision = rospy.Publisher("c4_ready",
                                               Int16,
                                               queue_size=10)
        rospy.Subscriber("arm_status", String, self.status_callback)
        rospy.Subscriber("opponent_move", Int16, self.player_move)

        self.status = True
        self.turn = "Start"
        self.player_action = None
Example #31
0
 def move(self, state, rounds):
     if self.type.lower() == 'human':
         print("{0}'s turn.  {0} is {1}".format(self.name, self.color))
         column = None
         while column == None:
             try:
                 choice = int(input("Enter a move (1-7): ")) - 1
             except ValueError:
                 choice = None
             if 0 <= choice <= 6:
                 column = choice
             else:
                 print("Invalid choice, try again")
         return column
     else:
         print("{0}'s turn.  {0} is {1}".format(self.name, self.color))
         #return random.randint(0, 6)
         m = Minimax(state)
         best_alpha, alpha = m.findColumn(self.difficulty, state, self.color, rounds)
         return best_alpha
Example #32
0
File: c4.py Project: mg515/connect4
    def randomai(self, r):
        print("---------------")
        g = deepcopy(self.stanje)
        g = transponiraj(g)
        g = obrni(g)
        m = Minimax(g)
        p = m.bestMove(self.tezavnost, g, 2)[0]
        if self.preverjanje1(self.stanje)[0] != -1 or self.preverjanje1(self.stanje)[1] != -1:
            if self.preverjanje1(self.stanje)[0] != -1:
                p = self.preverjanje1(self.stanje)[0]
            else: p = self.preverjanje1(self.stanje)[1]
            
        v = [i for i in range(len(self.stanje[p][:])) if self.stanje[p][i]!=0]
        v.append(6)
        m = min(v)-1


        self.steviloklikov[p] += 1
        self.stanje[p][m] = 2
        self.risanje(2, p, m)
        self.platno.bind("<Button-1>", self.klik)


        if self.zmaga(self.stanje):
            self.aiscore += 1
            self.risanjezmaga()
            m1=tkinter.messagebox.askyesno("Jaesus","You lost! New game?")
            if m1:
                self.zacetek()
                return
            else:
                return

        if sum(self.steviloklikov)==42:
            self.neodloceno+=1
            m1=tkinter.messagebox.askyesno("Tie","It's a tie. New game?")
            if m1:
                self.zacetek()
                return
            else:
                quit()
Example #33
0
    def __init__(self, mode, square_side, depth, ai_mode, omni):
        self.square_side = square_side
        player1 = Player(1, self.square_side)
        player2 = Player(2, self.square_side)
        self.player_turn = 1
        self.players = [player1, player2]
        self.mode = mode
        self.min_pos = square_side / 2
        self.max_pos = square_side * 8 - square_side / 2
        self.turn = 1

        if '3' == self.mode and omni:
            ai2 = ai_mode[1]
        else: ai2 = '3'
        
        self.ai_player2 = Minimax(depth, ai_mode[0], ai2)

        if '3' == self.mode:
            if omni:
                self.ai_player1 = Minimax(depth, ai2, ai_mode[0])
            else: self.ai_player1 = Minimax(depth, ai2, '3')
Example #34
0
class Computadora(object):

    def __init__(self, color, prune=3):
        self.limiteprofundidad = prune
        evaluador = Evaluador()
        self.minimaxObj = Minimax(evaluador.puntuacion)
        self.color = color

    def obtenertableroactual(self, tablero):
        self.tablero_actual = tablero

    def obtenermovida(self):
        return self.minimaxObj.minimax(self.tablero_actual, None, self.limiteprofundidad, self.color, cambiarcolor(self.color))
Example #35
0
class Computer( object ):
    def __init__( self, color, prune=3 ):
        self.depthLimit = prune
        evaluator = Evaluator()
        self.minimaxObj = Minimax( evaluator.score )
        self.color = color
        
    def get_current_board( self, board ):
        self.currentBoard = board

    def get_move( self ):
        return self.minimaxObj.minimax( self.currentBoard, None, self.depthLimit, self.color, \
                             change_color(self.color) )
Example #36
0
    def move(self, state):
        print("{0}'s turn.  {0} is {1}".format(self.name, self.color))

        # sleeping for about 1 second makes it looks like he's thinking
        #time.sleep(random.randrange(8, 17, 1)/10.0)
        #return random.randint(0, 6)

        port = serial.Serial("COM12", 9600)
        port.isOpen()

        m = Minimax(state)
        #m = Minimax(super_board)
        best_move, value = m.bestMove(self.difficulty, state, self.color)

        #print(state)
        #input(best_move + 1)
        print(best_move + 1)
        inp = 7 - best_move

        k = str.encode(str(inp))
        #time.sleep(2)
        time.sleep(1)
        #k = bytes(inp)
        port.write(k)
        port.write(b'1')

        #print(k)
        i = 0

        while True:
            response = port.read()
            if response == b'9':
                print(response)
                i = 1
            if (i == 1):
                break

        port.close()
        return best_move
Example #37
0
    def minimax(self, state):

        if state in self.table:
            return self.table[state].minimax_value

        state.updateWinCheck()
        if state.isTerminal():
            u = state.stateUtility()
            self.table[state] = Minimax(u, None)
            return u
        elif state.turn == "MAX":
            best_minimax_so_far = np.NINF
            best_move_for_state = None

            children_states = state.genNextBoards()
            for i in range(len(state.moves)):
                if state.moves[i]:
                    child_state = children_states[i]
                    minimax_of_child = self.minimax(child_state)
                    if minimax_of_child > best_minimax_so_far:
                        best_minimax_so_far = minimax_of_child
                        best_move_for_state = i
            self.table[state] = Minimax(best_minimax_so_far, best_move_for_state)
            return best_minimax_so_far
        else:
            best_minimax_so_far = np.Inf
            best_move_for_state = None

            children_states = state.genNextBoards()
            for i in range(len(state.moves)):
                if state.moves[i]:
                    child_state = children_states[i]
                    minimax_of_child = self.minimax(child_state)
                    if minimax_of_child < best_minimax_so_far:
                        best_minimax_so_far = minimax_of_child
                        best_move_for_state = i
            self.table[state] = Minimax(best_minimax_so_far, best_move_for_state)
            return best_minimax_so_far
Example #38
0
class Computer(object):
    def __init__(self, color, depth=5, vec=None):
        self.depthLimit = depth
        evaluator = Evaluator(vec)
        self.minimaxObj = Minimax(evaluator.evaluate_all_heuristics)
        self.color = color

    def get_current_board(self, board):
        self.current_board = board

    def get_move(self):
        return self.minimaxObj.minimax(self.current_board, None,
                                       self.depthLimit, self.color,
                                       change_color(self.color))
Example #39
0
class Computer(object):
    def __init__(self, color, prune=3):
        self.depthLimit = prune
        evaluator = Evaluator()
        self.minimaxObj = Minimax(evaluator.score)
        self.color = color

    def get_current_board(self, board):
        self.current_board = board

    def get_move(self):
        return self.minimaxObj.minimax(self.current_board, None,
                                       self.depthLimit, self.color,
                                       change_color(self.color))
Example #40
0
 def __init__(self, color, prune=3):
     self.depthLimit = prune
     evaluator = Evaluator()
     self.minimaxObj = Minimax(evaluator.score)
     self.color = color
    def move(self, state):
        print("Computer turn")

        m = Minimax(state)
        best_move, value = m.bestMove(self.difficulty, state, self.color)
        return best_move
Example #42
0
 def __init__(self, color, prune=3):
     self.limiteprofundidad = prune
     evaluador = Evaluador()
     self.minimaxObj = Minimax(evaluador.puntuacion)
     self.color = color
Example #43
0
 def move(self, state):
     print("{0}'s turn.  {0} is {1}, {2}".format(self.name, self.color, self.ctr))
     self.ctr+=1
     m = Minimax(state)
     best_move, value = m.bestMove(self.difficulty, state, self.color)
     return best_move