Пример #1
0
def get_player_input():
    # Die weißen figuren gehören zum Computer

    print("Eingabe in Form von SpalteZeile SpalteZeile. Beispiel A7 A6")
    cheesfield = ChessField()
    cheesfield.draw_field()
    minimax = mima.MiniMax(cheesfield)
    need_input = True

    while need_input:
        raw_input = input('Please make your move ')

        try:
            coordinates = cheesfield.convert_input_to_coordinates(raw_input)
            cheesfield.move_figure_to_position(coordinates[0], coordinates[1],
                                               False)
        except BaseException as e:
            print(str(e))
            continue

        winner = cheesfield.get_winner()
        if winner is None:
            minimax.compute()
            winner = cheesfield.get_winner()

        cheesfield.draw_field()
        if winner is not None:
            output_winner(winner)
Пример #2
0
    def __init__(self, board, **kwargs):
        super(UCT, self).__init__(board, **kwargs)
        self.stats = {}

        self.max_depth = 0
        self.data = {}
        time = 30    # should be 1 min but in case that time is over
        self.calculation_time = float(time)
        # self.calculation_time = float(kwargs.get('time', 3))  # @ST @NOTE Here calculation_time should be 1 min
        self.max_actions = int(kwargs.get('max_actions', 64))

        # Exploration constant, increase for more exploratory actions,
        # decrease to prefer actions with known higher win rates.
        self.C = float(kwargs.get('C', 1.96)) #Original1.4

        self.plugged_in_minimax = minimax.MiniMax(reversi.Board())
        self.minimax_max_depth = 1
Пример #3
0
def main():
    # g = game.SuperTicTacToe()
    # g.start()
    # g.play()
    wins = collections.defaultdict(int)
    numTrials = 100
    print('out of ' + str(numTrials) + ' games....')
    for i in range(0, numTrials):
        # game = simulate.Simulate(agent1 = mcts.MonteCarloTreeSearch(g.SuperTicTacToe(verbose = 0)), agent2 = random_agent.RandomAgent(), game = g.SuperTicTacToe(verbose = 0), verbose = 0)
        # result = game.run(trial)
        game = simulate.Simulate(\
            # agent1 = deep_q_learning.DeepQLearning(),\
            agent1 = minimax.MiniMax(), \
            agent2 = random_agent.RandomAgent(),\
            game = g.SuperTicTacToe(verbose = 0),\
            verbose = 1\
        )
        result = game.run(i)
        wins[str(result)] += 1
    print('x won ' + str(wins['x'] / float(numTrials)) + '% of the time')
    print('o won ' + str(wins['o'] / float(numTrials)) + '% of the time')
    print('tie ' + str(wins['False'] / float(numTrials)) + '% of the time')
Пример #4
0
        game_status = get_game_status(game_board)
        if game_status == 2:
            print("BLUE WINS!")
        if game_status == 3:
            print("ITS A TIE!")
        if game_status != 0:
            print_char_board(alphabet_board)
            return game_status

        move_number += 1


if __name__ == "__main__":
    alpha_beta_red = alphabeta.AlphaBeta(1, 3)
    alpha_beta_blue = alphabeta.AlphaBeta(2, 3)
    minimax_red = minimax.MiniMax(1, 3)
    minimax_blue = minimax.MiniMax(2, 3)
    reflex_red = reflex.ReflexAgent(1)
    reflex_blue = reflex.ReflexAgent(2)

    print("Alpha-Beta vs MiniMax")
    start_1 = time.time()
    play_game(alpha_beta_red, minimax_blue)
    end_1 = time.time()
    print("total runtime : {0:.3f} seconds".format(end_1 - start_1))
    print("Red nodes expanded: " + str(alpha_beta_red.get_nodes_expanded()))
    alpha_beta_red.reset()
    print("Blue nodes expanded: " + str(minimax_blue.get_nodes_expanded()))
    minimax_blue.reset()

    print("MiniMax vs Alpha-Beta")
Пример #5
0
#Name: Anton Strickland

import minimax
import state
import visual

tryAgain = True
playing = True
aiTurn = True
gameboard = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
validMoves = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
actualState = minimax.State(gameboard[:])
game = minimax.MiniMax()
remainingMoves = validMoves[:]
Vis = visual.Visualizer()
winString = ""
 
while(tryAgain):

  while(playing):
    Vis.visualize(actualState,0,100)
    if (actualState.aiTurn):
      testState = minimax.State(actualState.grid[:])
      # print(testState)
      move = game.MiniMaxDecision(testState)
      actualState.TakeTurn(move)
      remainingMoves.remove(move)
    else:
      while(move not in remainingMoves):
        move = Vis.getInput()
      actualState.TakeTurn(int(move))
Пример #6
0
def main():
    monte = montecarlo.MonteCarlo()
    minim = minimax.MiniMax()
Пример #7
0
import player
import gameBoard
import random
import minimax
import time
from numpy import inf

if __name__ == '__main__':
	# Setup Game
	player1 = player.Player(0)
	player2 = player.Player(1)
	players = [player1, player2]
	myGameBoard = gameBoard.GameBoard()
	myGameBoard.printGameBoardWorkers(-1, -1, players)
	myMiniMax = minimax.MiniMax()
	
	# Initial worker placement
	# for playerNum in range(2):
	# 	for workerNum in range(2):
	# 		placedWorker = False
	# 		while placedWorker == False:
	# 			row = random.randint(0, 4)
	# 			col = random.randint(0, 4)
	# 			placedWorker = myGameBoard.placeInitialWorker(players[playerNum], workerNum, row, col)
	# 	myGameBoard.printGameBoardWorkers(-1, -1, players)
	myGameBoard.placeInitialWorker(players[0], 0, 0, 0)
	myGameBoard.placeInitialWorker(players[0], 1, 0, 1)
	myGameBoard.placeInitialWorker(players[1], 0, 1, 0)
	myGameBoard.placeInitialWorker(players[1], 1, 1, 1)
	for turnNum in range(100):
		print('TURN: ', turnNum)
Пример #8
0
import mancala
import minimax

m = mancala.Mancala("Human", "CPU")
ai = minimax.MiniMax(mancala.PLAYER2, depth=5)

while not m.is_game_over():
    try:
        print(m)
        if m.current_player() == mancala.PLAYER1:
            move = int(
                input("current player: %s" % m.get_current_player_name()))
        else:
            print("getting move from AI")
            move = ai.get_next_move(m)
            print("AI chose %i" % move)
        m.play_turn(move)

    except Exception as e:
        print(e.message)

print(m.get_winner() + " wins!")