def minimax_simple(node, depth, maximizingPlayer):
    global myColor
    global opponentColor
    color = opponentColor
    if maximizingPlayer:
        color = myColor
        
    if depth == 0 or isTerminal(node, color):
        return (None, heuristic(node))
    bestMove = None
    moves = getAllPossibleMoves(node, color)
    if maximizingPlayer:
        bestValue = -1000000000000
        for move in moves:
            newBoard = deepcopy(node)
            gamePlay.doMove(newBoard,move)
            moveVal = minimax_simple( newBoard, depth-1, False)
            if bestValue < moveVal[1]:
                bestValue = moveVal[1]
                bestMove = move
    else:
        bestValue = 1000000000000
        for move in moves:
            newBoard = deepcopy(node)
            gamePlay.doMove(newBoard,move)
            moveVal = minimax_simple( newBoard, depth-1, True)
            if bestValue > moveVal[1]:
                bestValue = moveVal[1]
                bestMove = move

    return (bestMove, bestValue)
Ejemplo n.º 2
0
def minimax(board, color, time, alpha, beta, depth, maximizingPlayer, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    if depth == 0 or not moves:
	return evaluation(board, color), []
    
    if maximizingPlayer == True:
    	best = float("-inf")
    	for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            alpha, x = minimax(newBoard, gamePlay.getOpponentColor(color), time, best, beta, depth-1, False, movesRemaining) 
            if best == float("-inf") or alpha > best:
                bestMove = move
                best = alpha
	    if best >= beta:
		break
	return best, bestMove
    else:
    	best = float("inf")
    	for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            beta, x = minimax(newBoard, gamePlay.getOpponentColor(color), time, alpha, best, depth-1, True, movesRemaining) 
            if best == float("inf") or beta < best:
                bestMove = move
                best = beta
	    if alpha >= best:
		break
        return best, bestMove
Ejemplo n.º 3
0
def traverseTree(parnt, board, color, depth):
    #print "color",color
    global alpha
    global beta
    global localalpha
    global flag
    moves = getMoves(board, color)

    if depth == globalDepth or len(moves) == 0:
        value = valueBoard(board)
        parnt[1] = value
        return

    #print "moves",moves
    #if len(moves) == 0:
    #	return

    #iCounter=0
    for move in moves:
        #print "iCounter :",iCounter
        flag = True
        node = makeNode(parnt, None, move[0], move[1])
        #print "a"
        newBoard = deepcopy(board)
        #print "b"
        gamePlay.doMove(newBoard, color, move)
        #print "c"
        #gamePlay.printBoard(newBoard)
        #print newBoard
        oppColor = gamePlay.opponent(color)
        traverseTree(node, newBoard, oppColor, depth + 1)
        #if traverseTreeReturn=="depth" or traverseTreeReturn=="pass":
        #gamePlay.printBoard(newBoard)
        #value=valueBoard(newBoard)
        #print "value",node[0][1]
        if node[0][1] == None:
            node[0][1] = node[1]
            if depth % 2 == 0:
                localalpha = node[1]
            if depth % 2 == 1:
                beta = node[1]
        elif node[0][1] <= node[1] and depth % 2 == 0:
            node[0][1] = node[1]
            if localalpha != None and localalpha < node[1]:
                localalpha = node[1]
        elif node[0][1] >= node[1] and depth % 2 == 1:
            node[0][1] = node[1]
            if beta != None and beta > node[1]:
                beta = node[1]

        if alpha != None and beta != None:
            if alpha >= beta and flag == True:
                flag = False
                break
        if localalpha != None and beta != None:
            if localalpha >= beta and flag == True:
                flag = False
                break

    return
Ejemplo n.º 4
0
def nextMove(board, color, time, movesRemaining):
    depth = 8
    global myColor
    myColor = color 
 
    global moveCount
    moveCount += 1

    # if time left <=60 reduce depth to 5
    if time <= 60:
        depth = 5

    # during initial moves keep depth to 4
    if moveCount <= 4:
	depth = 4
    moves = getAllPossibleMoves(board, color)

    # if possible moves is just one, dont call minmax.
    if len(moves) == 1:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,moves[0])
	return moves[0]

    val, bestMove = minimax(board, color, time, float("-inf"), float("inf"), depth, True, movesRemaining)
    return bestMove
def max_stage(board, depth_limit, cur_depth, player):

    #Base case of recursive call
    if cur_depth == depth_limit:
        return eval_func(board, player)
    #print "Inside Max Stage: Player ", player

    # Find all possible moves
    moves = generate_possible_moves(board, player)

    #No possible moves. Game over. Return to find the winner
    if len(moves) == 0:
        return "pass"
    else:
        highest_minimax_value = float('-inf')
        best_move = moves[0]
        for move in moves:
            #print "Move Max: ", move, cur_depth
            cur_board = deepcopy(board)
            gamePlay.doMove(cur_board, player, move)
            #Recursive call to minimax_strategy to go to deeper node
            cur_minimax_value = min_stage(cur_board, depth_limit,
                                          (cur_depth + 1), flip_player(player))
            #print "Current Max: ",cur_minimax_value
            if cur_minimax_value > highest_minimax_value:
                #print "Selected Max: ", cur_minimax_value
                highest_minimax_value = cur_minimax_value
                best_move = move

        #print "Final Choice Max:" , highest_minimax_value, cur_depth
        return highest_minimax_value
Ejemplo n.º 6
0
def minimax(board,depth,alpha,beta,maximizingPlayer):
    global currentPlayerColor 
   
    global opponentPlayerColor
    if depth==0 or not isAnyMovePossible(board, currentPlayerColor) or not isAnyMovePossible(board,opponentPlayerColor):
        return evaluation(board) 
    if maximizingPlayer:
        v = -sys.maxint - 1
        moves = getAllPossibleMoves(board, opponentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            
            gamePlay.doMove(newBoard,move)
            #print "inside max",color
            #color=getOpponentColor(color)
            v = max(v,minimax(newBoard, depth-1, alpha, beta, False))
            alpha = max(alpha,v)
            if beta <= alpha:
                return alpha
        return v
    else:
        v = sys.maxint
        moves = getAllPossibleMoves(board, currentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            #print "inside min",color
            #color=getOpponentColor(color)
            v = min(v,minimax(newBoard, depth-1, alpha, beta, True))
            beta = min(beta,v)
            if beta <= alpha:
                return beta
        return v
        
Ejemplo n.º 7
0
def expandHelper(board,color,x,y,lst):
    if(myValidMove(board,color,(x,y))):
        #print x,y,color
        newboard=np.array(board);
        #make move:
        doMove(newboard,color,(x,y));
        lst.append(((x,y),newboard));
Ejemplo n.º 8
0
def nextMove(board, color, time, movesRemaining):
    depth = 8
    global myColor
    myColor = color

    global moveCount
    moveCount += 1

    # if time left <=60 reduce depth to 5
    if time <= 60:
        depth = 5

    # during initial moves keep depth to 4
    if moveCount <= 4:
        depth = 4
    moves = getAllPossibleMoves(board, color)

    # if possible moves is just one, dont call minmax.
    if len(moves) == 1:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard, moves[0])
        return moves[0]

    val, bestMove = minimax(board, color, time, float("-inf"), float("inf"),
                            depth, True, movesRemaining)
    return bestMove
Ejemplo n.º 9
0
def alphabetaPruning(board,color,alpha,beta, depth, maxval,movesRemaining):
	if depth == 0 or movesRemaining == 0:  # Checking whether I am in the end node, then I will return my evaluation value
		return evaluation(board, maxColor)
	moves = getAllPossibleMoves(board,color)	# Getting all values of the new board that I passed from nextMove
        opponentColor = gamePlay.getOpponentColor(color) # Getting the opponent color
	test = float("inf") # Assigning value for infinity
	if maxval:    #Checking whether maxval is Max
		value = -test #Assigning the value of -infnity to value
		for move in moves: # looping through all the moves
			evalBoard = deepcopy(board) #Taking the deepcopy of board
			gamePlay.doMove(evalBoard,move)
			#value = evaluation(evalBoard, color)
			#print value, "VALUE"
			value = max(value,alphabetaPruning(evalBoard,opponentColor,alpha,beta, depth-1, False,movesRemaining)) # performing recursive call and assigning the maximum value to the "value"
			alpha = max(alpha, value) # Assigning the max value to alpha
			if beta <= alpha: #checking whether beta is lesser than alpha
				break
		return value # returning the value
	else:
		value = test # Assigning the positive max value to alpha
		for move in moves: # iterating through all the values
			evalBoard = deepcopy(board) #Making a deepcopy
			#value = evaluation(evalBoard, color)	
			gamePlay.doMove(evalBoard,move)
			value = min(value,alphabetaPruning(evalBoard,opponentColor,alpha,beta, depth-1, True, movesRemaining))	# Performing the recursive call and assigning the minimum value to "value"		
			#value = min(move, alphabetaPruning(move, depth-1, True))
			beta = min(beta, value) # Assiging the minumum value to beta by comparing beta and value
			if beta <= alpha: # checking whether beta is lesser than   alpha
				break
		return value	# returning the value
Ejemplo n.º 10
0
def minimax(board, color, time, alpha, beta, depth, maximizingPlayer,
            movesRemaining):
    moves = getAllPossibleMoves(board, color)
    if depth == 0 or not moves:
        return evaluation(board, color), []

    if maximizingPlayer == True:
        best = float("-inf")
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            alpha, x = minimax(newBoard, gamePlay.getOpponentColor(color),
                               time, best, beta, depth - 1, False,
                               movesRemaining)
            if best == float("-inf") or alpha > best:
                bestMove = move
                best = alpha
            if best >= beta:
                break
        return best, bestMove
    else:
        best = float("inf")
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            beta, x = minimax(newBoard, gamePlay.getOpponentColor(color), time,
                              alpha, best, depth - 1, True, movesRemaining)
            if best == float("inf") or beta < best:
                bestMove = move
                best = beta
            if alpha >= best:
                break
        return best, bestMove
Ejemplo n.º 11
0
def alphabeta(board, alpha, beta, depth, color, player):
    can_moves = []
    for i in range(8):
        for j in range(8):
            if gamePlay.valid(board, player, (i,j)):
                can_moves.append((i,j))

    if depth == 0 or len(can_moves) == 0:
        return calculate_heuristic(board, color)

    if(player == color):#Maximizing
        v = -999999
        for can_move in can_moves:
            temp_board = copy.deepcopy(board)
            gamePlay.doMove(temp_board, player, can_move)
            v = max(v, alphabeta(temp_board, alpha, beta, depth-1, color, gamePlay.opponent(player)))
            alpha = max(alpha, v)
            if(alpha >= beta):
                break

        return v
    else:#Minimizing
        v = 999999
        for can_move in can_moves:
            temp_board = copy.deepcopy(board)
            gamePlay.doMove(temp_board, player, can_move)
            v = min(v, alphabeta(temp_board, alpha, beta, depth-1, color, gamePlay.opponent(player)))
            beta = min(beta, v)
            if (alpha >= beta):
                break

        return v
Ejemplo n.º 12
0
def nextMove(board, color, time):
	best_val = None
	best_move = None
	moves = []
	for row in range(8):
		for col in range(8):
			if gamePlay.valid(board, color, (row, col)):
				moves.append((row, col))
	#shuffle the moves in case it places the same position in every game
	#shuffle(moves)
	if len(moves) == 0:
		return "pass"
	if moves == "pass":
		return "pass"
	opp = gamePlay.opponent(color)
	#evaluate max's position and choose the best value
	if color == "B":
		for move in moves:
			newBoard = board[:]
			gamePlay.doMove(newBoard, color, move)
			#alpha = - INFINITY beta = INFINITY
			#we want to choose the max one
			if best_val = max(best_val, alpha_beta(newBoard, opp, 3, -INFINITY, INFINITY)):
				#update best move
				best_move = move
def minimax_strategy(board, depth_limit, cur_depth, player):

    #print "Inside minimax_strategy"
    # Find all possible moves
    moves = generate_possible_moves(board, player)

    #No possible moves. Game over. Return to find the winner
    if len(moves) == 0:
        return "pass"
    else:
        best_minimax_value = float('-inf')
        best_move = moves[0]
        for move in moves:
            cur_board = deepcopy(board)
            gamePlay.doMove(cur_board, player, move)
            #print "Move:" , move
            #Recursive call to minimax_strategy to go to deeper node
            cur_minimax_value = min_stage(cur_board, depth_limit,
                                          (cur_depth + 1), flip_player(player))
            #cur_minimax_value = minimax_strategy(cur_board, depth_limit, (cur_depth+1), best_minimax_value, flip_player(player))

            if cur_minimax_value > best_minimax_value:
                best_minimax_value = cur_minimax_value
                best_move = move

        return best_move
Ejemplo n.º 14
0
def nextMove(board, color, time):
	root = Node()                                           # 루트노드 생성
	for i in range(8):
		for j in range(8):
			if valid(board, color, (i, j)):                 # 놓을 수 있는 곳이면 자식으로 추가
				temp = Node()
				temp.data = (i,j)
				temp.board = deepcopy(board)
				doMove(temp.board, color, temp.data)
				root.children.append(temp)
	if len(root.children) == 0:
		return "pass"

	makeTree(root, color, 1)                         # 트리 생성
	alphabeta(root, limitDepth, -INF, INF, MAX, color)      # assign cost

	maxx = -INF
	bestmove = 0
	n = len(root.children)
	for i in range(n):                                      # 최대 이익을 선택
		if maxx < root.children[i].cost:
			maxx = root.children[i].cost
			bestmove = root.children[i].data

	return bestmove
Ejemplo n.º 15
0
def resultOfAction(kBoard, kColor, kNextMove):
    """
    return board after applying the action specified by move
    """
    changedBoard = deepcopy(kBoard)
    doMove(changedBoard, kColor, kNextMove)

    return changedBoard
Ejemplo n.º 16
0
def expandHelper(board,color,x,y,lst):
    global visitedStates;
    if(myValidMove(board,color,(x,y))):
        #print x,y,color
        newboard=np.array(board);
        #make move:
        doMove(newboard,color,(x,y));
        lst.append(newboard);
Ejemplo n.º 17
0
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    if len(moves) == 1:
        return moves[0]
    opponentColor = gamePlay.getOpponentColor(color)
    equalMoves = []
    best = None
    alpha = None
    beta = float("inf")
    # If the time remaining < 3 seconds, then just apply simpleGreedy and increase depth according to time
    if time < 3:
        depth = 1
    elif time < 10:
        depth = 2
    elif time < 30:
        depth = 4
    else:
        if movesRemaining > 40:
            depth = 8
        else:
            depth = 6

    for move in moves: # this is the max turn(1st level of minimax), so next should be min's turn
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        #Beta is always inf here as there is no parent MIN node. So no need to check if we can prune or not.
        moveVal = evaluation(newBoard, color, depth, 'min', opponentColor, alpha, beta)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
            equalMoves = []
            equalMoves.append(move)
        elif moveVal == best:
            equalMoves.append(move)
        if best > alpha:
            alpha = best
    #So the equalMoves consists of all the moves that have ended up with same value after Minimax evaluation
    if len(equalMoves) > 1:
        #The below logic tries to see if there is any next move that will form a defensive structure from the
        #equalMoves list and returns it.
        for move in equalMoves:
            l = len(move)
            xy = gamePlay.serialToGrid(move[l-1])
            x = xy[0]
            y = xy[1]
            if (x+1) <= 7:
                if (y+1) <= 7 and board[x+1][y+1].lower() == color.lower():
                    return move
                if (y-1) >= 0 and board[x+1][y-1].lower() == color.lower():
                    return move
            if (x-1) >= 0:
                if (y+1) <= 7 and board[x-1][y+1].lower() == color.lower():
                    return move
                if (y-1) >= 0 and board[x-1][y-1].lower() == color.lower():
                    return move
    return bestMove
Ejemplo n.º 18
0
def nextMove(board, color, time, movesRemaining):
    global my_color, init_time, time_set, init_moves
    my_color = color
    print "My move turn\n"
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    bestMove=None

    if len(moves)==1:              ## return the move when only a single move is present 
        bestMove = moves[0]
        print "return the only move left\n"
    else:                                 ## more than one possible move is present.
        best = None
        depth =0
        heuristic = curr_evaluation
        alpha = -float('inf')
        beta = float('inf')
        if not time_set:            ## recording the time given so as to split into intervals
            time_set = True
            init_time = time
            init_moves=movesRemaining
        ##print init_time, "W##########"
        if init_time*3/4 <=time and time<init_time:   ##game is in the first quarter
            if movesRemaining>146:
                depth = random.randrange(2,4)
            elif movesRemaining>138 and movesRemaining <=146:
                depth =5
            elif movesRemaining > init_moves*2/3 and movesRemaining<=138:
                depth=6
            else:
                depth=5
        elif init_time/2 <=time and time< init_time*3/4:     ## game is in the second quarter
            if movesRemaining>init_moves/3 and movesRemaining< 2*init_moves/3:
                depth = random.randrange(3,5)
            elif movesRemaining>0 and movesRemaining <=init_moves/3:
                depth = random.randrange(4,6)
                #heuristic = 
        elif time>0 and time<= init_time/4:
            if movesRemaining>init_moves/3 and movesRemaining< 2*init_moves/3:
                depth = random.randrange(4,7)
            elif movesRemaining>0 and movesRemaining <=init_moves/3:
                depth = random.randrange(6,8)
            else:
                depth=5
                ##heuristic =
            
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)        
            moveVal = miniMax(newBoard,depth,float('inf'), alpha, True, gamePlay.getOpponentColor(color), heuristic)  ### we have already evaluated Max's childs here so, its Min's turn to make a move on each of these childs, so min turn is true.
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
                alpha = moveVal
           
    return bestMove
Ejemplo n.º 19
0
def minimax(board,depth,alpha,beta,maximizingPlayer):
    global currentPlayerColor 
   
    global opponentPlayerColor
    if depth==0 or not isAnyMovePossible(board, currentPlayerColor) or not isAnyMovePossible(board,opponentPlayerColor):
        if countPieces(board,currentPlayerColor)>7:
            '''
            initial stage and opening moves trying to focus on center and attack
            '''
            return (0.75 * evaluationMovingToCenter(board))+ (0.20 * evaluationAttackFunction(board))+(0.5*evaluationColorVsOpposite(board))
        
        elif countPieces(board,currentPlayerColor)>=6 :
            '''
            middle stage have to be defensive And would also need to be attacktive
            ''' 
            return (0.75 * evaluationMovingToDefense(board))+ (0.15 * evaluationAttackFunction(board))+(0.5*evaluationColorVsOpposite(board)+(0.5*evaluationMakingItKing(board)))
        elif countPieces(board,currentPlayerColor)>4 :
            '''
            middle stage have to be get in center amd attack
             
            '''  
            return (0.20 * evaluationMovingToCenter(board))+ (0.70 * evaluationAttackFunction(board))+(0.5*evaluationColorVsOpposite(board)+(0.5*evaluationMakingItKing(board))+evaluationCanBeAttacked(board))
        else:
            '''
            when 4 or less than four pieces are remaining
            '''   
            return (0.50 * evaluationAttackFunction(board))+(0.30*evaluationColorVsOpposite(board)+(0.20*evaluationMovingToDefense(board))+evaluationCanBeAttacked(board))      
    if maximizingPlayer:
        v = -sys.maxint - 1
        moves = getAllPossibleMoves(board, opponentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            
            gamePlay.doMove(newBoard,move)
            #print "inside max",color
            #color=getOpponentColor(color)
            v = max(v,minimax(newBoard, depth-1, alpha, beta, False))
            alpha = max(alpha,v)
            if beta <= alpha:
                return alpha
        return v
    else:
        v = sys.maxint
        moves = getAllPossibleMoves(board, currentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            #print "inside min",color
            #color=getOpponentColor(color)
            v = min(v,minimax(newBoard, depth-1, alpha, beta, True))
            beta = min(beta,v)
            if beta <= alpha:
                return beta
        return v
        
Ejemplo n.º 20
0
def nextMove(board, col, time, movesRemaining):
   
    #print "player color",color
    global currentPlayerColor 
    currentPlayerColor=col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)
    
    '''If there is only 1 move possible no need to evaluate just return that moves'''
    if len(moves)==1:
        return moves[0]
    else:
        best = None
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)       
            alpha = -sys.maxint - 1
            beta = sys.maxint
            '''
            Time and moves remaining are few based on the simple greedy evaluation of the board 
            '''
            if time < 5 or (movesRemaining)<4:
                moveVal = evaluation1(newBoard)
                     
            else:  
                 
                '''
                #if moves remaing are very few we donot go too deep
                '''
                if (movesRemaining/2)<30:
                    moveVal = minimax(newBoard,3,alpha,beta,True)
                    '''inital moves'''    
                elif movesRemaining>140:
                    #start of the game heuristics handles the opening so dont need to go to deep
                    moveVal = minimax(newBoard,3,alpha,beta,True)              
                else:
                    '''last moves'''
                    if time < 5:
                        moveVal = minimax(newBoard,1,alpha,beta,True)
                    elif time<18:
                        moveVal = minimax(newBoard,3,alpha,beta,True)
                    elif time<23:
                        moveVal = minimax(newBoard,5,alpha,beta,True)        
                    elif time<28:
                        moveVal = minimax(newBoard,7,alpha,beta,True)
                        '''middle moves'''    
                    else:
                        moveVal = minimax(newBoard,5,alpha,beta,True)   
            
            #moveVal = minimax(newBoard,depth,alpha,beta,True) #we cal minimax to evaluate
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
        return bestMove
Ejemplo n.º 21
0
def nextMove(board, col, time, movesRemaining):
    global deep
    global timeList
    #Assigned global current and  opponent color used to expand nodes and evaluate as per the player
    global currentPlayerColor 
    currentPlayerColor=col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)#returns a list of possible moves[] for the current player
    #Trying to find the move where my game has best score
    if len(moves)==1:
        return moves[0]
    timeList.append(time)
    elapsed=timeList[0]-time
    originalTime = elapsed + time
    print "timeList" , timeList
    if(len(timeList)>=2):
        print "secondLast", timeList[-2]
        differenceTime = timeList[-2]-timeList[-1]
    else:
        differenceTime=0    
    print "last",timeList[-1]
    
    best = None
    
    #if time==0 or ((differenceTime*2) >= timeList[-1]) or countPieces(board, currentPlayerColor)<= differenceTime:
            #or time differnce between last and second last 
    if time<=3 or movesRemaining>=time:
        newBoard = deepcopy(board)
        print "calling random move"
        bestMove = randomMove(newBoard,currentPlayerColor)                
    else:        
        for move in moves:  
            newBoard = deepcopy(board)          
            gamePlay.doMove(newBoard,move)#get the possible states
            depth=deep
            
            alpha = -sys.maxint - 1
            beta = sys.maxint           
            print "calling next move" 
            
            moveVal = minimax(newBoard,deep,alpha,beta,True)
                    
                        
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
        if deep<=7:            
            deep=deep+1
            
    print "increased depth,new depth is ",deep   
    #timeList.append(time)
    print"time list after eval",timeList    
    return bestMove
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    # Trying to find the move where I have best score
    best = bestMove = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moveVal = getMoveVal(newBoard, getOpponentColor(color), time, movesRemaining, 4, False, 0, sys.maxint)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard, move)
        moveVal = evaluation(newBoard, color)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
Ejemplo n.º 24
0
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moveVal = evaluation(newBoard, color)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
Ejemplo n.º 25
0
def nextMove(board, col, time, movesRemaining):

    #print "player color",color
    global currentPlayerColor
    currentPlayerColor = col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)
    '''If there is only 1 move possible no need to evaluate just return that moves'''
    if len(moves) == 1:
        return moves[0]
    else:
        best = None
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            alpha = -sys.maxint - 1
            beta = sys.maxint
            '''
            Time and moves remaining are few based on the simple greedy evaluation of the board 
            '''
            if time < 5 or (movesRemaining) < 4:
                moveVal = evaluation1(newBoard)

            else:
                '''
                #if moves remaing are very few we donot go too deep
                '''
                if (movesRemaining / 2) < 30:
                    moveVal = minimax(newBoard, 3, alpha, beta, True)
                    '''inital moves'''
                elif movesRemaining > 140:
                    #start of the game heuristics handles the opening so dont need to go to deep
                    moveVal = minimax(newBoard, 3, alpha, beta, True)
                else:
                    '''last moves'''
                    if time < 5:
                        moveVal = minimax(newBoard, 1, alpha, beta, True)
                    elif time < 18:
                        moveVal = minimax(newBoard, 3, alpha, beta, True)
                    elif time < 23:
                        moveVal = minimax(newBoard, 5, alpha, beta, True)
                    elif time < 28:
                        moveVal = minimax(newBoard, 7, alpha, beta, True)
                        '''middle moves'''
                    else:
                        moveVal = minimax(newBoard, 5, alpha, beta, True)

            #moveVal = minimax(newBoard,depth,alpha,beta,True) #we cal minimax to evaluate
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
        return bestMove
Ejemplo n.º 26
0
def nextMove(board, color, timeDuration, reversed = False):
    '''Function called to play the next move from gamePlay.py'''
    global initialTimeStamp                         # Changing the scope to Global for the variable
    global currentTimeStamp                         # Changing the scope to Global for the variable
    global timeBuffer                               # Changing the scope to Global for the variable

    if timeDuration > 1:                            # Resetting the Time Buffer based on the Time duration passed from Game Play
        timeBuffer = 1
    initialTimeStamp = time.time()                  # Capturing the time stamp at the moment nextMove is called
    
    childMoves = possibleChildMoves(board, color)   # Get the possible valid moves for the board
    if len(childMoves) == 0:                        # If there are no more valid moves, return Pass
        return "pass"
    if len(childMoves) == 1 :                       # If there is only One valid move, return it
        return childMoves[0]
    totalPlayedTiles = totalPlayed(board)           # Find the total number of pieces on the board. 
    
    # Based on the total number pieces on the board, determine the depth limit for the minimax
    if totalPlayedTiles < 20:                       # If the total number of turns is less than 20, set the depth limit to 2
        depthLimit = 2
    #If the total turns is in the range of 20 and 50, set the limit to 4 or 2 based on time
    elif totalPlayedTiles >= 20 and totalPlayedTiles < 50: 
        if timeDuration < 10:                       # If the remaining time is less than 10 seconds, set the depth limit to 2. Else, 4
            depthLimit = 2
        else:                
            depthLimit = 4
    #If the total turns is greater than 50, set the limit to 6 or 4 based on time
    else :                              
        if timeDuration < 15:                       # If the remaining time is less than 15 seconds, set the depth limit to 4. Else, 6
            depthLimit = 4
        else:                
            depthLimit = 6
                
    bestScore = 0                                   # Initialize the best score to 0
    bestMove = childMoves[0]                        # Initialize the best move to be the first move of the list
     
    for move in childMoves:                         # For every valid move, perform the following
    
        currentTimeStamp = time.time()                      # Capture the current time stamp
        timeElapsed = currentTimeStamp - initialTimeStamp   # Find the time elapsed since the call from Game Play
        if timeElapsed > (timeDuration - timeBuffer):       # Check if the time elapsed is more than the allowed duration minus buffer
            return bestMove
        
        newBoard = deepcopy(board)                  # Make a copy of the current board
        gamePlay.doMove(newBoard,color,move)        # Play the move to check if its best using the upcoming operations
        
        # Call the Minimax - Alpha Beta Pruning algorithm with Depth Limit, Alpha, Beta, Opponent Player and Maximizing Flag as False
        score = miniMaxAlphaBeta(newBoard, depthLimit-1,-10000,10000, opponent(color) , False, timeDuration)  
        if  score > bestScore:                      # If the returned score is better than the best Score, update the Best Score and Move
            bestMove = move
            bestScore = score
            
    return bestMove                                 # Return the best move
Ejemplo n.º 27
0
def evaluation(board, color, depth, turn, opponentColor, alpha, beta):
    if depth > 1: #Comes here depth-1 times and goes to else for leaf nodes.
        depth -= 1
        opti = None
        if turn == 'max':
            moves = getAllPossibleMoves(board, color) #Gets all possible moves for player
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if beta > opti:
                    value = evaluation(nextBoard, color, depth, 'min', opponentColor, alpha, beta)
                    if value > opti: #None is less than everything and anything so we don't need opti == None check
                        opti = value
                    if opti > alpha:
                        alpha = opti

        elif turn == 'min':
            moves = getAllPossibleMoves(board, opponentColor) #Gets all possible moves for the opponent
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if alpha == None or opti == None or alpha < opti: #None conditions are to check for the first times
                    value = evaluation(nextBoard, color, depth, 'max', opponentColor, alpha, beta)
                    if opti == None or value < opti: #opti = None for the first time
                        opti = value
                    if opti < beta:
                        beta = opti

        return opti # opti will contain the best value for player in MAX turn and worst value for player in MIN turn

    else: #Comes here for the last level i.e leaf nodes
        value = 0
        for piece in range(1, 33):
            xy = gamePlay.serialToGrid(piece)
            x = xy[0]
            y = xy[1]
            #Below, we count the number of kings and men for each color.
            #A player king is 1.5 times more valuable than a player man.
            #An opponent king is 1.5 times worse for the player than an opponent man.
            #By assigning more weight on kings, the AI will prefer killing opponent kings to killing opponent men.
            #It will also prefer saving player kings to saving player men when the situation demands.
            #If a player king is double the value of a man, then AI may choose to sacrifice a man to make a king.
            #To avoid this, a factor of 1.5 has been chosen.
            if board[x][y] == color.lower():
                value += 2
            elif board[x][y] == opponentColor.lower():
                value -= 2
            elif board[x][y] == color.upper():
                value += 3
            elif board[x][y] == opponentColor.upper():
                value -= 3
        return value
Ejemplo n.º 28
0
def evaluation(board, color, depth, turn, opponentColor, alpha, beta):
    if depth > 1: #Comes here depth-1 times and goes to else for leaf nodes.
        depth -= 1
        opti = None
        if turn == 'max':
            moves = getAllPossibleMoves(board, color) #Gets all possible moves for player
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if beta > opti:
                    value = evaluation(nextBoard, color, depth, 'min', opponentColor, alpha, beta)
                    if value > opti: #None is less than everything and anything so we don't need opti == None check
                        opti = value
                    if opti > alpha:
                        alpha = opti

        elif turn == 'min':
            moves = getAllPossibleMoves(board, opponentColor) #Gets all possible moves for the opponent
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if alpha == None or opti == None or alpha < opti: #None conditions are to check for the first times
                    value = evaluation(nextBoard, color, depth, 'max', opponentColor, alpha, beta)
                    if opti == None or value < opti: #opti = None for the first time
                        opti = value
                    if opti < beta:
                        beta = opti

        return opti # opti will contain the best value for player in MAX turn and worst value for player in MIN turn

    else: #Comes here for the last level i.e leaf nodes
        value = 0
        for piece in range(1, 33):
            xy = gamePlay.serialToGrid(piece)
            x = xy[0]
            y = xy[1]
            #Below, we count the number of kings and men for each color.
            #A player king is 1.5 times more valuable than a player man.
            #An opponent king is 1.5 times worse for the player than an opponent man.
            #By assigning more weight on kings, the AI will prefer killing opponent kings to killing opponent men.
            #It will also prefer saving player kings to saving player men when the situation demands.
            #If a player king is double the value of a man, then AI may choose to sacrifice a man to make a king.
            #To avoid this, a factor of 1.5 has been chosen.
            if board[x][y] == color.lower():
                value += 2
            elif board[x][y] == opponentColor.lower():
                value -= 2
            elif board[x][y] == color.upper():
                value += 3
            elif board[x][y] == opponentColor.upper():
                value -= 3
        return value
def handleError(board, color, time, movesRemaining):
    #fall back to simple greedy, when there is an error in the code.. :)
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moveVal = evaluation(newBoard, color)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
Ejemplo n.º 30
0
def nextMove(board, color, time, reversed=False):
    global myColor
    global opponentColor
    global alpha
    global beta
    global localalpha
    opponentColor = gamePlay.opponent(color)
    myColor = color
    moves = getMoves(board, color)
    #print "moves  :",moves
    if len(moves) == 0:
        return "pass"
    childNodes = []
    changeTime(time)
    changeWeight(board)
    bestMove = None
    bestValue = None
    #print "moves :",len(moves)

    for move in moves:

        parnt = makeNode(None, None, move[0], move[1])
        childNodes.append(parnt)
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard, color, move)
        #gamePlay.printBoard(newBoard)
        #gamePlay.printBoard(newBoard)
        oppColor = gamePlay.opponent(color)
        traverseTree(parnt, newBoard, oppColor, 1)
        #print "aaaaaaa ",aaaa
        if bestMove == None:
            bestMove = move
            bestValue = parnt[1]
        elif bestValue < parnt[1]:
            bestMove = move
            bestValue = parnt[1]
        if alpha == None:
            alpha = beta
        if alpha < beta:
            alpha = beta
        beta = None
        localalpha = None

        #print parnt[1]
    #for n in childNodes:
    #	print "final value :",n[1]

    #print "bestValue ",bestValue
    #print "bestMove ",bestMove
    #iop=raw_input()
    return bestMove
Ejemplo n.º 31
0
def nextMove(board, color, time, reversed = False):
	global myColor
	global opponentColor
	global alpha
	global beta
	global localalpha
	opponentColor=gamePlay.opponent(color)
	myColor=color
	moves=getMoves(board,color)
	#print "moves  :",moves
	if len(moves) == 0:
	   return "pass"
	childNodes=[]
  	changeTime(time)
	changeWeight(board)
	bestMove=None
	bestValue=None
	#print "moves :",len(moves)

	for move in moves:

		parnt = makeNode(None,None,move[0],move[1])
		childNodes.append(parnt)
		newBoard = deepcopy(board)
		gamePlay.doMove(newBoard,color,move)
		#gamePlay.printBoard(newBoard)
		#gamePlay.printBoard(newBoard)
		oppColor=gamePlay.opponent(color)
		traverseTree(parnt,newBoard,oppColor,1)
		#print "aaaaaaa ",aaaa
		if bestMove==None:
			bestMove=move
			bestValue=parnt[1]
		elif bestValue<parnt[1]:
			bestMove=move
			bestValue=parnt[1]
		if alpha == None:
			alpha=beta
		if alpha<beta:
			alpha=beta
		beta=None
		localalpha=None

		#print parnt[1]
	#for n in childNodes:
	#	print "final value :",n[1]

	#print "bestValue ",bestValue
	#print "bestMove ",bestMove
	#iop=raw_input()
	return bestMove
Ejemplo n.º 32
0
def simple_greedy(board, color, reversed = False):
	'''Simple greedy strategy'''
	moves = get_successors(board, color)
	if len(moves) == 0:
		return "pass"
	best = None
	for move in moves:
		new_board = deepcopy(board)
		gamePlay.doMove(new_board, color, move)
		move_value = value(new_board, color)
		if best == None or better_than(move_value, best, color, reversed):
			best_move = move
			best = move_value
		return best_move
Ejemplo n.º 33
0
def nextMove(board, color, reversed = False):
    moves = []
    for i in range(8):
           for j in range(8):
                 if gamePlay.valid(board, color, (i,j)):
                      moves.append((i,j))
    if len(moves) == 0:
           return "pass"
    best = None
    for move in moves:
           newBoard = deepcopy(board)
              gamePlay.doMove(newBoard,color,move)
                 moveVal = value(newBoard)
                    if best == None or betterThan(moveVal, best, color, reversed):
                          bestMove = move
Ejemplo n.º 34
0
def nextMove(board,color,time):
	decrease=0 #decrease the depth of alpha beta prunning.
	if time>=63.98:#at first, whichever piece we take, it is not a big deal.so we should save time at beginning.
		decrease=2
	elif time<17 and time>=10:
		decrease=1
	elif time<10: #when time is less than 10s
		decrease=2
	elif time<=0:
		return "pass"
	moves=[]
	bonus=[]

	for i in range(8):
		for j in range(8):
			if gamePlay.valid(board,color,(i,j)):
				moves.append((i,j))
	if len(moves)==0:
		return "pass"
	best=-100
	alpha=-100
	beta=100
	if (0,0) in moves:
		bonus.append((0,0))
	if(0,7) in moves:
		bonus.append((0,7))
	if (7,0) in moves:
		bonus.append((7,0))
	if (7,7) in moves:
		bonus.append((7,7))
	if len(bonus)>0:
		for move in bonus:
			newBoard=deepcopy(board)
			gamePlay.doMove(newBoard,color,move)
			tmp=alphabeta(2-decrease,newBoard,alpha,beta,"MIN",color)
			if best<tmp:
				best=tmp
				bestMove=move
	else:
		for move in moves:
			newBoard=deepcopy(board)
			gamePlay.doMove(newBoard,color,move)
			tmp=alphabeta(5-decrease,newBoard,alpha,beta,"MIN",color)
			if best<tmp:
				best=tmp
				bestMove=move

	return bestMove
Ejemplo n.º 35
0
def alphaBetaPruning(node, depth, time):
  moves = getAllPossibleMoves(node[0], node[2])
  bestMove = moves[0]
  #to set the lower bound at max node    
  bestScore = -float("inf")
  for move in moves:
    newBoard = deepcopy(node[0])
    gamePlay.doMove(newBoard,move)
    #creating a new node from the initial board generation
    newNode = returnList(newBoard, node[1]+1, gamePlay.getOpponentColor(node[2]))    
    #calls the min function next
    score = minimum(newNode, depth, -float("inf"), float("inf"), time)
    if score > bestScore:
      bestMove = move
      bestScore = score
  return bestMove
Ejemplo n.º 36
0
def nextMove(board, color, time):
    validMoves = validMove(board, color)
    if len(validMoves) == 0:
        return "pass"
    best = None
    alpha = -1000
    beta = 1000
    for move in validMoves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard, color, move)
        moveVal = alphabeta(newBoard, 3, alpha, beta, False, color,
                            reverseColor(color))
        if best == None or best <= moveVal:
            bestMove = move
            best = moveVal
    return bestMove
Ejemplo n.º 37
0
def maxChance(newBoard,depth,color,opponentColor,alpha,beta):
    if depth == 0: # If the depth reaches the value 0, it means we have reached the leave node and now we need to estimate the value at that node using evaluation functions. 
        return evaluation(newBoard,color,opponentColor)
    else:
        maxScore = None
        moves = getAllPossibleMoves(newBoard,color)
        for move in moves:
            nextBoard = deepcopy(newBoard)
            gamePlay.doMove(nextBoard,move)
            if beta > maxScore:
                    score = minChance(nextBoard,depth-1,color, opponentColor, alpha, beta) # This is the min turn, so next should be max's turn. This process continues recursively.
                    if score > maxScore: 
                        maxScore = score
                    if maxScore > alpha:
                        alpha = maxScore
    return maxScore
Ejemplo n.º 38
0
def alpha_beta(board, color):
	'''Alpha-beta pruning'''
	next_positions = get_successors(board, color)
	if len(next_positions) == 0:
		return "pass"
	next_color = gamePlay.opponent(color)
	best_value = -100000
	best_move = (-1, -1)
	'''This for loop works as max_value(), so here starts the recursion with min_value()'''
	for pos in next_positions:
		tempboard = deepcopy(board)
		gamePlay.doMove(tempboard, color, pos)
		val = min_value(tempboard, next_color, 6) # Search depth is 6. If it is deeper, we need to wait too long for result
		if val >= best_value:
			best_value = val
			best_move = pos
	return best_move
Ejemplo n.º 39
0
def nextMove(board, color, time, reversed = False):
    moves = []
    for i in range(8):
	   for j in range(8):
		  if gamePlay.valid(board, color, (i,j)):
			 moves.append((i,j))
    if len(moves) == 0:
	   return "pass"
    best = None
    for move in moves:
	   newBoard = deepcopy(board)
	   gamePlay.doMove(newBoard,color,move)
	   moveVal = value(newBoard)
	   if best == None or betterThan(moveVal, best, color, reversed):
		  bestMove = move
		  best = moveVal
    return bestMove
Ejemplo n.º 40
0
def minimax(board, color, depth, alpha, beta, maximizingPlayer):
    #By default if we do not get any move, we use "pass"
    bestMove = "pass"
    
    #Successor function called on current board 
    moves = successor(board, color)
    if depth == 0 or moves== "pass":
        return value(board, color, maximizingPlayer),"pass"
    
    #Recursing over self and opponent
    if maximizingPlayer: 
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,color, move)
            child = newBoard
            x = minimax(child, gamePlay.opponent(color), depth - 1, alpha, beta, False)[0]
            
            #alpha will select the maximum value
            if alpha < x:
                alpha = x
                bestMove = move 
                
            #pruning condition
            if beta <= alpha:
                break             
        #maxplayer will always return the best move    
        return alpha,bestMove
    
    #opponents best play
    else:
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,color, move)
            child =  newBoard
            y = minimax(child, gamePlay.opponent(color), depth - 1, alpha, beta, True)[0]
            
            #beta will select the minimum value
            if beta > y:
                beta = y
                
            #pruning condition   
            if beta <= alpha:
                break
             
        #Doesn't matter what move we pass, this will never be returned to nextmove    
        return beta, "pass"
	def minimize(board, alpha, beta, depth):
		global opponentColor
		global myColor
		#Return a heurisitc based score once the depth limit is reached
                if depth <=0 or not gamePlay.isAnyMovePossible(board, opponentColor):
                        return evaluate(board, myColor)
                score = sys.maxint
                for move in getAllPossibleMoves(board, myColor):
                        newBoard = deepcopy(board)
                        gamePlay.doMove(newBoard, move)
                        score = min(score, maximize(board, alpha, beta, depth-1))
			
			#alpha cut-off
                        if score <= alpha:
                                return score
                        beta = min(beta, score)
                return score
Ejemplo n.º 42
0
def iterativeDeepeningAlphaBetaPruning(board, time, maxRemainingMoves):
    # Set depth limit depending the available time
    
   


    global myColor
    global opponentColor

    # Don't call mini-max, return the best move at the game start according to the player's color. 
    if maxRemainingMoves == 150:
	if myColor == 'r':
		return [11, 15]
	else:
		return [22, 18]

    moves = getAllPossibleMoves(board, myColor)
   
    #return the only move, if any
    if len(moves) == 1:
	return moves[0]
    depth = 4
    myPieces = gamePlay.countPieces(board, myColor)
    opponentPieces = gamePlay.countPieces(board, opponentColor)

    # piece ratio for deciding the depth
    pieceRatio = myPieces/opponentPieces
    if pieceRatio < 1:
	depth = 6

    if time < 30 and pieceRatio < 1: depth = 3
    elif time < 20 and pieceRatio > 1: depth = 2
    elif time < 10: depth = 1 
    bestMove = None
    best = -sys.maxint-1
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        #Calling mini-max with alpha-beta pruning

        moveVal = alphaBetaPruning(newBoard, depth,time)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
Ejemplo n.º 43
0
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    opponentColor = gamePlay.getOpponentColor(color)
    depth = 5
    best = None
    alpha = None
    beta = float("inf")
    for move in moves: # this is the max turn(1st level of minimax), so next should be min's turn
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        #Beta is always inf here as there is no parent MIN node. So no need to check if we can prune or not.
        moveVal = evaluation(newBoard, color, depth, 'min', opponentColor, alpha, beta)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
        if best > alpha:
            alpha = best
    return bestMove
Ejemplo n.º 44
0
def minChance(newBoard,depth,color,opponentColor,alpha,beta):
    if depth == 0: #If the depth reaches the value 0, it means we have reached the leave node and now we need to estimate the value at that node using evaluation functions.
        return evaluation(newBoard,color,opponentColor)
    else:
        minimumScore = None
        moves = getAllPossibleMoves(newBoard, opponentColor)
        for move in moves:
            nextBoard = deepcopy(newBoard)
            gamePlay.doMove(nextBoard,move)
            #If the alpha score is less than the minimum score then rest of the nodes can be pruned. 
            if alpha == None or minimumScore == None or alpha < minimumScore: #None is less than everything and anything
                score = maxChance(nextBoard,depth-1, color, opponentColor, alpha, beta) # This is the min turn, so next should be max's turn. This process continues recursively. 
                if minimumScore == None or score < minimumScore:
                    minimumScore = score
                if minimumScore < beta:
                    beta = minimumScore

    return minimumScore
Ejemplo n.º 45
0
def alphaBeta(board, move, depth, alpha, beta, maximizingPlayer, color, opColor):
    if depth == 0:
        # Different evaluation values multiplied by weightage
        moveVal = evaluation1(board, color) * 10
        if gamePhase(board, color) == 3:
            kingVal = evaluation2(board, color) * 5
        else:
            kingVal = 0
        capVal = evaluation3(board, color) * 10
        posVal = evaluation4(board, color) * 1 
        promoVal = evaluation5(board, color) * 1 
        if gamePhase(board, color) != 0:
            baseVal = evaluation6(board, color) * 1 
        else:
            baseVal = 0
        flockVal = evaluation7(board, color) * 2 
        # print moveVal, kingVal, capVal, posVal, promoVal, baseVal, flockVal
        bestVal = moveVal + kingVal + capVal + posVal + promoVal + baseVal + flockVal
        return bestVal
    
    if (maximizingPlayer):
        bestVal = -float('inf')
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moves = getAllPossibleMoves(newBoard, color)
        for move in moves:
            bestVal = max(bestVal, alphaBeta(newBoard, move, depth-1, alpha, beta, False, color, opColor))
            alpha = max(alpha, bestVal)
            if beta <= alpha:
                break
        return bestVal

    else:
        bestVal = float('inf')
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moves = getAllPossibleMoves(newBoard, opColor)
        for move in moves:
            bestVal = min(bestVal, alphaBeta(newBoard, move, depth-1, alpha, beta, True, color, opColor))
            beta = min(beta, bestVal)
            if  beta <= alpha:
                break
        return bestVal
Ejemplo n.º 46
0
def minimax(board, color, depth):
  	#Find the best move in the game
  	#if depth = 0, we calculate the score
    if depth == 0:
    	return eval_fn(board, color)
    #if game is over, we calculate the score
    if gamePlay.gameOver(board):
        return gamePlay.score(board)

    best_val = None
    best_move = None
    opp = gamePlay.opponent(color)
    # valid moves
    moves = []
    for row in range(8):
    	for col in range(8):
    		if gamePlay.valid(board, color, (row,col)):
			 	moves.append((row,col))
	#shuffle the moves in case it places the same position in every game
	#shuffle(moves)
	if len(moves) == 0:
		return "pass"
	if move == "pass":
		return eval_fn(board, color)
	#try each move in valid moves
    #evaluate max's position and choose the best value
	if color == "B":
		for move in moves:
			newBoard = board[:]
    		gamePlay.doMove(newboard, color, move)
    		val = minimax(newBoard, opp, depth-1)
    		if best_val is None or val > (best_val, best_move)[0]:
				(best_val, best_move) = (val, move)
    #evaluate min's position and choose the best value
    if color == "W":
    	for move in moves:
			newBoard = board[:]
			gamePlay.doMove(newboard, color, move)
			val = minimax(newBoard, opp, depth-1)
			if best_val is None or val < (best_val, best_move)[0]:
				(best_val, best_move) = (val, move)
    return (best_val, best_move)[0]
Ejemplo n.º 47
0
def alpha_beta(board, color, depth, alpha, beta):
	"""Find the utility value of the game and the best_val move in the game."""
	
	if depth == 0:
		return eval_fn(board, color)
	if gamePlay.gameOver(board):
		return gamePlay.score(board)
	
	moves = []
	for row in range(8):
		for col in range(8):
			if gamePlay.valid(board, color, (row, col)):
				moves.append((row, col))
	#shuffle the moves in case it places the same position in every game
	#shuffle(moves)
	if len(moves) == 0:
		return "pass"
	if moves == "pass":
		return eval_fn(board, color)

	opp = gamePlay.opponent(color)
	# try each move
	#evaluate max's position and choose the best value
	if color == "B":
		for move in moves:
			newBoard = board[:]
			gamePlay.doMove(newBoard, color, move)
			#cut off the branches
			alpha = max(alpha, alpha_beta(newBoard, opp, depth-1, alpha, beta))
			if beta <= alpha:
				return
		return alpha
	#evaluate min's position and choose the best value
	if color == "W":
		for move in moves:
			newBoard = board[:]
			gamePlay.doMove(newBoard, color, move)
			#cut off the branches
			beta = min(beta, alpha_beta(newBoard, opp, depth-1, alpha, beta))
			if beta <= alpha:
				return
		return beta
def max_stage_with_pruning(board, depth_limit, cur_depth, player, alpha, beta,
                           time_lmt, start_time):

    cur_time = time.time()
    #Base case of recursive call
    if cur_depth == depth_limit or (cur_time - start_time) > (time_lmt - 1):
        return eval_func(board, player)
    #print "Inside Max Stage: Player ", player

    # Find all possible moves
    moves = generate_possible_moves(board, player)

    #No possible moves. Game over. Return to find the winner
    if len(moves) == 0:
        return "pass"
    else:
        highest_minimax_value = float('-inf')
        best_move = moves[0]
        for move in moves:
            #print "Move Max: ", move, cur_depth
            cur_board = deepcopy(board)
            gamePlay.doMove(cur_board, player, move)
            #Recursive call to minimax_strategy to go to deeper node
            cur_minimax_value = min_stage_with_pruning(cur_board, depth_limit,
                                                       (cur_depth + 1),
                                                       flip_player(player),
                                                       alpha, beta, time_lmt,
                                                       start_time)
            #print "Current Max: ",cur_minimax_value
            if cur_minimax_value > highest_minimax_value:
                #print "Selected Max: ", cur_minimax_value
                highest_minimax_value = cur_minimax_value
                best_move = move
            cur_time = time.time()
            if highest_minimax_value >= beta or (cur_time -
                                                 start_time) > (time_lmt - 1):
                return highest_minimax_value
            alpha = max(highest_minimax_value, alpha)

        #print "Final Choice Max:" , highest_minimax_value, cur_depth
        return highest_minimax_value
Ejemplo n.º 49
0
def miniMaxAlphaBeta(board , depth ,alpha,beta, color, maximizingPlayer,timeDuration):
    '''Minimax algorithm to find the best move''' 

    currentTimeStamp = time.time()                              # Capturing the current time stamp
    timeElapsed = currentTimeStamp - initialTimeStamp           # Calculating the time elapsed since the call from Game Play

    if depth == 0 or timeElapsed > (timeDuration - timeBuffer): # Check if the Depth has reached the limit
        currHeurValue = heurValue(board, color)                 # If yes, return the heuristic value of the current board
        return currHeurValue

    childMoves = possibleChildMoves(board, color)   # Get the possible valid moves for the board

    if len(childMoves)==0:                          # Check if there are no more valid moves
        currHeurValue = heurValue(board, color)     # If yes, return the heuristic value of the current board
        return currHeurValue

    if maximizingPlayer == True:                    # Run if its Maximizing Players turn
        for move in childMoves:                     # For every valid move, do the following
            newBoard = deepcopy(board)              # Create a copy of the current board
            gamePlay.doMove(newBoard,color,move)    # Play the move to check if its best using the upcoming operations

            # Call the Minimax - Alpha Beta Pruning algorithm with the Depth, Alpha, Beta, Inverted Color and Maximizing Flag as False
            result = miniMaxAlphaBeta(newBoard, depth-1,alpha,beta, opponent(color), False,timeDuration)

            alpha = max(alpha, result)              # Get the maximum of the alpha and result
            if alpha >= beta:                       # Check if Alpha >= Beta
                break                               # Prune, if True
        return alpha                                # Return Alpha
        
    if maximizingPlayer == False:                   # Run if its Minimizing Players turn
        for move in childMoves:                     # For every valid move, do the following
            newBoard = deepcopy(board)              # Create a copy of the current board
            gamePlay.doMove(newBoard,color,move)    # Play the move to check if its best using the upcoming operations 
            
            # Call the Minimax - Alpha Beta Pruning algorithm with the Depth, Alpha, Beta, Inverted Color and Maximizing Flag as True               
            result = miniMaxAlphaBeta(newBoard, depth-1,alpha,beta, opponent(color), True,timeDuration)

            beta = min(beta,result)                 # Get the maximum of the alpha and result
            if beta <= alpha:                       # Check if Alpha >= Beta
                break                               # Prune, if True
        return beta                                 # Return Beta
Ejemplo n.º 50
0
def nextMove(board, col, time, movesRemaining):
   
    #print "player color",color
    global currentPlayerColor 
    currentPlayerColor=col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)#returns a list of possible moves[]
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)#get the possible states
        depth=5
        alpha = -sys.maxint - 1
        beta = sys.maxint
        moveVal = minimax(newBoard,depth,alpha,beta,True) #we cal minimax to evaluate
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
Ejemplo n.º 51
0
def minimax(board, maxP, color, alpha, beta, depth):
#calculates maximum when maxP is True and minimum otherwise
    infty = float('inf')
    moves = getAllPossibleMoves(board, color)
    opponentColor = gamePlay.getOpponentColor(color)
    if maxP:
        if moves == [] or depth == 0:
            return [], evaluation(board)
        best_score = -infty
        best_move = moves[0]
        for move in moves:
            boardTemp = deepcopy(board)
            gamePlay.doMove(boardTemp,move)
            m,score = minimax(boardTemp,False,opponentColor,alpha,beta,depth-1)
            if score > best_score:
                best_move = move
                best_score = score
            alpha = max(alpha,score)
#Beta cut-off
            if alpha >= beta:
                break
    else:
        if moves == [] or depth == 0:
            return [], evaluation(board)
        best_score = infty
        best_move = moves[0]
        for move in moves:
            boardTemp = deepcopy(board)
            gamePlay.doMove(boardTemp,move)
            m,score = minimax(boardTemp,True,opponentColor,alpha,beta,depth-1)
            if score < best_score:
                best_move = move
                best_score = score
            beta = min(beta,score)
#Alpha cut-off
            if alpha >= beta:
                break
                
    return best_move, best_score
Ejemplo n.º 52
0
def possibleChildMoves(board,color):
    '''Function to get the valid moves for a Board''' 
    moves = []                                      # List to maintain the possible valid moves
    orderedMovesValues = []                         # List to maintain the moves and their heuristic values
    orderedMoves = []                               # List to maintain the ordered moves
    for i in range(8):
        for j in range(8):
            if gamePlay.valid(board, color, (i,j)): # Check if the generated move is valid
                moves.append((i,j))                 # Append the move to the list

    for move in moves : 
            newBoard = deepcopy(board)                      # Create a copy of the current board
            gamePlay.doMove(newBoard,color,move)            # Play the move to check its heuristic value
            currHeurValue = heurValue(newBoard,color)       # Find the heuristic value of the move
            orderedMovesValues.append([currHeurValue,move]) # Append the move along with its heuristic value to a new list
    
    orderedMovesValues.sort(key=lambda x: x[0],reverse = True) # Sort the list in Descending order based on heuristic value

    for i in range(0,len(orderedMovesValues)):
        orderedMoves.append(orderedMovesValues[i][1])       # Extract the set of moves from the Ordered Heuristic List

    return orderedMoves                                     # Return the Ordered Moves
Ejemplo n.º 53
0
def alphabeta(board, depth, alpha, beta, maximizingPlayer, playerColor,
              currentColor):
    validMoves = validMove(board, currentColor)
    if depth == 0 or len(validMoves) == 0:
        return value(board, playerColor)
    # 플레이어인 경우 최대 점수(알파) 선택
    if maximizingPlayer:
        for move in validMoves:
            tempBoard = deepcopy(board)
            # 현재 색의 플레이어가 움직이게 함
            gamePlay.doMove(tempBoard, currentColor, move)
            # 다음 알파베타 프루닝을 위해 색 바꾸기
            currentColor = reverseColor(currentColor)
            alpha = max([
                alpha,
                alphabeta(tempBoard, depth - 1, alpha, beta, False,
                          playerColor, currentColor)
            ])
            if alpha >= beta:
                break
        return alpha
    # 상대인 경우 최소 점수(베타) 선택
    else:
        for move in validMoves:
            tempBoard = deepcopy(board)
            # 현재 색의 플레이어가 움직이게 함
            gamePlay.doMove(tempBoard, currentColor, move)
            # 다음 알파베타 프루닝을 위해 색 바꾸기
            currentColor = reverseColor(currentColor)
            beta = min([
                beta,
                alphabeta(tempBoard, depth - 1, alpha, beta, True, playerColor,
                          currentColor)
            ])
            if alpha >= beta:
                break
        return beta
def alpha_beta_pruning_strategy(board, depth_limit, cur_depth, player, itime):

    time_lmt = itime
    start_time = time.time()
    alpha = float('-inf')
    beta = float('inf')
    #print "Inside minimax_strategy"
    # Find all possible moves
    moves = generate_possible_moves(board, player)

    #No possible moves. Game over. Return to find the winner
    if len(moves) == 0:
        return "pass"
    else:
        best_minimax_value = float('-inf')
        best_move = moves[0]
        for move in moves:
            cur_board = deepcopy(board)
            gamePlay.doMove(cur_board, player, move)
            #print "Move:" , move
            #Recursive call to minimax_strategy to go to deeper node
            cur_minimax_value = min_stage_with_pruning(cur_board, depth_limit,
                                                       (cur_depth + 1),
                                                       flip_player(player),
                                                       alpha, beta, time_lmt,
                                                       start_time)
            #cur_minimax_value = minimax_strategy(cur_board, depth_limit, (cur_depth+1), best_minimax_value, flip_player(player))

            if cur_minimax_value > best_minimax_value:
                best_minimax_value = cur_minimax_value
                best_move = move
            cur_time = time.time()
            if best_minimax_value >= beta or (cur_time -
                                              start_time) > (time_lmt - 1):
                return best_move
            alpha = max(best_minimax_value, alpha)
        return best_move
Ejemplo n.º 55
0
def nextMove(board, color, time, movesRemaining):

    possibleMoves = getAllPossibleMoves(board, color)
    opponentColor = gamePlay.getOpponentColor(color) 

    #Trying to find the move where I have best score
    bestValue = None
    bestmove = possibleMoves[0]
    depth = 5
    alpha = float("-inf")
    beta = float("inf")
    for move in possibleMoves: 
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move) #
        #retrieves the value from minChance.
        #Here, alpha is the lower bound of the actual MiniMax value and beta is the upper bound of the actual MiniMax value.         
        score = minChance(newBoard, depth-1,color, opponentColor, alpha, beta) # This is the max turn(1st level of minimax), so next should be min's turn
        if bestValue == None or score > bestValue:
        #If the better score is found than the current best value then that move is considered as the best move. Hence, it is taken. 
            bestMove = move
            bestValue = score
        if bestValue > alpha:
            alpha = bestValue
    return bestMove
Ejemplo n.º 56
0
def changeB(cboard, Color, NextMove):  #그수를 뒀다면 뒤집어지는 모든 상황들을 반영시키기

    changeB = deepcopy(cboard)
    doMove(changeB, Color, NextMove)
    return changeB