コード例 #1
0
def minimax(board,depth,alpha,beta,maximizingPlayer):
    global currentPlayerColor 
   
    global opponentPlayerColor
    if depth==0 or not isAnyMovePossible(board, currentPlayerColor) or not isAnyMovePossible(board,opponentPlayerColor):
        return evaluation(board) 
    if maximizingPlayer:
        v = -sys.maxint - 1
        moves = getAllPossibleMoves(board, opponentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            
            gamePlay.doMove(newBoard,move)
            #print "inside max",color
            #color=getOpponentColor(color)
            v = max(v,minimax(newBoard, depth-1, alpha, beta, False))
            alpha = max(alpha,v)
            if beta <= alpha:
                return alpha
        return v
    else:
        v = sys.maxint
        moves = getAllPossibleMoves(board, currentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            #print "inside min",color
            #color=getOpponentColor(color)
            v = min(v,minimax(newBoard, depth-1, alpha, beta, True))
            beta = min(beta,v)
            if beta <= alpha:
                return beta
        return v
        
コード例 #2
0
def minimax(board,depth,alpha,beta,maximizingPlayer):
    global currentPlayerColor 
   
    global opponentPlayerColor
    if depth==0 or not isAnyMovePossible(board, currentPlayerColor) or not isAnyMovePossible(board,opponentPlayerColor):
        if countPieces(board,currentPlayerColor)>7:
            '''
            initial stage and opening moves trying to focus on center and attack
            '''
            return (0.75 * evaluationMovingToCenter(board))+ (0.20 * evaluationAttackFunction(board))+(0.5*evaluationColorVsOpposite(board))
        
        elif countPieces(board,currentPlayerColor)>=6 :
            '''
            middle stage have to be defensive And would also need to be attacktive
            ''' 
            return (0.75 * evaluationMovingToDefense(board))+ (0.15 * evaluationAttackFunction(board))+(0.5*evaluationColorVsOpposite(board)+(0.5*evaluationMakingItKing(board)))
        elif countPieces(board,currentPlayerColor)>4 :
            '''
            middle stage have to be get in center amd attack
             
            '''  
            return (0.20 * evaluationMovingToCenter(board))+ (0.70 * evaluationAttackFunction(board))+(0.5*evaluationColorVsOpposite(board)+(0.5*evaluationMakingItKing(board))+evaluationCanBeAttacked(board))
        else:
            '''
            when 4 or less than four pieces are remaining
            '''   
            return (0.50 * evaluationAttackFunction(board))+(0.30*evaluationColorVsOpposite(board)+(0.20*evaluationMovingToDefense(board))+evaluationCanBeAttacked(board))      
    if maximizingPlayer:
        v = -sys.maxint - 1
        moves = getAllPossibleMoves(board, opponentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            
            gamePlay.doMove(newBoard,move)
            #print "inside max",color
            #color=getOpponentColor(color)
            v = max(v,minimax(newBoard, depth-1, alpha, beta, False))
            alpha = max(alpha,v)
            if beta <= alpha:
                return alpha
        return v
    else:
        v = sys.maxint
        moves = getAllPossibleMoves(board, currentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            #print "inside min",color
            #color=getOpponentColor(color)
            v = min(v,minimax(newBoard, depth-1, alpha, beta, True))
            beta = min(beta,v)
            if beta <= alpha:
                return beta
        return v
        
コード例 #3
0
def evaluation(board, color, depth, turn, opponentColor, alpha, beta):
    if depth > 1: #Comes here depth-1 times and goes to else for leaf nodes.
        depth -= 1
        opti = None
        if turn == 'max':
            moves = getAllPossibleMoves(board, color) #Gets all possible moves for player
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if beta > opti:
                    value = evaluation(nextBoard, color, depth, 'min', opponentColor, alpha, beta)
                    if value > opti: #None is less than everything and anything so we don't need opti == None check
                        opti = value
                    if opti > alpha:
                        alpha = opti

        elif turn == 'min':
            moves = getAllPossibleMoves(board, opponentColor) #Gets all possible moves for the opponent
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if alpha == None or opti == None or alpha < opti: #None conditions are to check for the first times
                    value = evaluation(nextBoard, color, depth, 'max', opponentColor, alpha, beta)
                    if opti == None or value < opti: #opti = None for the first time
                        opti = value
                    if opti < beta:
                        beta = opti

        return opti # opti will contain the best value for player in MAX turn and worst value for player in MIN turn

    else: #Comes here for the last level i.e leaf nodes
        value = 0
        for piece in range(1, 33):
            xy = gamePlay.serialToGrid(piece)
            x = xy[0]
            y = xy[1]
            #Below, we count the number of kings and men for each color.
            #A player king is 1.5 times more valuable than a player man.
            #An opponent king is 1.5 times worse for the player than an opponent man.
            #By assigning more weight on kings, the AI will prefer killing opponent kings to killing opponent men.
            #It will also prefer saving player kings to saving player men when the situation demands.
            #If a player king is double the value of a man, then AI may choose to sacrifice a man to make a king.
            #To avoid this, a factor of 1.5 has been chosen.
            if board[x][y] == color.lower():
                value += 2
            elif board[x][y] == opponentColor.lower():
                value -= 2
            elif board[x][y] == color.upper():
                value += 3
            elif board[x][y] == opponentColor.upper():
                value -= 3
        return value
コード例 #4
0
ファイル: vpalakur.py プロジェクト: doganaltinbas/Checkers-AI
def evaluation(board, color, depth, turn, opponentColor, alpha, beta):
    if depth > 1: #Comes here depth-1 times and goes to else for leaf nodes.
        depth -= 1
        opti = None
        if turn == 'max':
            moves = getAllPossibleMoves(board, color) #Gets all possible moves for player
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if beta > opti:
                    value = evaluation(nextBoard, color, depth, 'min', opponentColor, alpha, beta)
                    if value > opti: #None is less than everything and anything so we don't need opti == None check
                        opti = value
                    if opti > alpha:
                        alpha = opti

        elif turn == 'min':
            moves = getAllPossibleMoves(board, opponentColor) #Gets all possible moves for the opponent
            for move in moves:
                nextBoard = deepcopy(board)
                gamePlay.doMove(nextBoard,move)
                if alpha == None or opti == None or alpha < opti: #None conditions are to check for the first times
                    value = evaluation(nextBoard, color, depth, 'max', opponentColor, alpha, beta)
                    if opti == None or value < opti: #opti = None for the first time
                        opti = value
                    if opti < beta:
                        beta = opti

        return opti # opti will contain the best value for player in MAX turn and worst value for player in MIN turn

    else: #Comes here for the last level i.e leaf nodes
        value = 0
        for piece in range(1, 33):
            xy = gamePlay.serialToGrid(piece)
            x = xy[0]
            y = xy[1]
            #Below, we count the number of kings and men for each color.
            #A player king is 1.5 times more valuable than a player man.
            #An opponent king is 1.5 times worse for the player than an opponent man.
            #By assigning more weight on kings, the AI will prefer killing opponent kings to killing opponent men.
            #It will also prefer saving player kings to saving player men when the situation demands.
            #If a player king is double the value of a man, then AI may choose to sacrifice a man to make a king.
            #To avoid this, a factor of 1.5 has been chosen.
            if board[x][y] == color.lower():
                value += 2
            elif board[x][y] == opponentColor.lower():
                value -= 2
            elif board[x][y] == color.upper():
                value += 3
            elif board[x][y] == opponentColor.upper():
                value -= 3
        return value
コード例 #5
0
def minimax_simple(node, depth, maximizingPlayer):
    global myColor
    global opponentColor
    color = opponentColor
    if maximizingPlayer:
        color = myColor
        
    if depth == 0 or isTerminal(node, color):
        return (None, heuristic(node))
    bestMove = None
    moves = getAllPossibleMoves(node, color)
    if maximizingPlayer:
        bestValue = -1000000000000
        for move in moves:
            newBoard = deepcopy(node)
            gamePlay.doMove(newBoard,move)
            moveVal = minimax_simple( newBoard, depth-1, False)
            if bestValue < moveVal[1]:
                bestValue = moveVal[1]
                bestMove = move
    else:
        bestValue = 1000000000000
        for move in moves:
            newBoard = deepcopy(node)
            gamePlay.doMove(newBoard,move)
            moveVal = minimax_simple( newBoard, depth-1, True)
            if bestValue > moveVal[1]:
                bestValue = moveVal[1]
                bestMove = move

    return (bestMove, bestValue)
コード例 #6
0
def nextMove(board, color, time, movesRemaining):
    depth = 8
    global myColor
    myColor = color 
 
    global moveCount
    moveCount += 1

    # if time left <=60 reduce depth to 5
    if time <= 60:
        depth = 5

    # during initial moves keep depth to 4
    if moveCount <= 4:
	depth = 4
    moves = getAllPossibleMoves(board, color)

    # if possible moves is just one, dont call minmax.
    if len(moves) == 1:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,moves[0])
	return moves[0]

    val, bestMove = minimax(board, color, time, float("-inf"), float("inf"), depth, True, movesRemaining)
    return bestMove
コード例 #7
0
def minimax(board, color, time, alpha, beta, depth, maximizingPlayer, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    if depth == 0 or not moves:
	return evaluation(board, color), []
    
    if maximizingPlayer == True:
    	best = float("-inf")
    	for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            alpha, x = minimax(newBoard, gamePlay.getOpponentColor(color), time, best, beta, depth-1, False, movesRemaining) 
            if best == float("-inf") or alpha > best:
                bestMove = move
                best = alpha
	    if best >= beta:
		break
	return best, bestMove
    else:
    	best = float("inf")
    	for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            beta, x = minimax(newBoard, gamePlay.getOpponentColor(color), time, alpha, best, depth-1, True, movesRemaining) 
            if best == float("inf") or beta < best:
                bestMove = move
                best = beta
	    if alpha >= best:
		break
        return best, bestMove
コード例 #8
0
def nextMove(board, color, time, movesRemaining): 
  #generates all the possible moves from the current state of board   
  global count
  count = count + 1

  moves = getAllPossibleMoves(board, color)    
  if len(moves) == 0:
    return "pass"
  elif len(moves) == 1:
    return moves[0]
  #Now the current state recieved from gamePlay is taken as maxNode  
  maxNode = returnList(board, 0, color)  
  
  # Here, I am not wasting much time for playing the first two moves
  if count < 3:
    bestmove = alphaBetaPruning(maxNode,4, time)
  # when time remaining is less than 10 seconds, only go to depth 2 
  elif (time < 10):
    bestmove = alphaBetaPruning(maxNode,2, time)
  # when time remaining is less than 10 seconds, only go to depth 4
  elif (time < 20):
    bestmove = alphaBetaPruning(maxNode,4, time)
  #start the alpha beta pruning to obtain the best move with depth currently set to 6
  else:
    bestmove = alphaBetaPruning(maxNode,6, time) 
  # print "Best Move: ", bestmove
  return bestmove
コード例 #9
0
def nextMove(board, color, time, movesRemaining):
    depth = 8
    global myColor
    myColor = color

    global moveCount
    moveCount += 1

    # if time left <=60 reduce depth to 5
    if time <= 60:
        depth = 5

    # during initial moves keep depth to 4
    if moveCount <= 4:
        depth = 4
    moves = getAllPossibleMoves(board, color)

    # if possible moves is just one, dont call minmax.
    if len(moves) == 1:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard, moves[0])
        return moves[0]

    val, bestMove = minimax(board, color, time, float("-inf"), float("inf"),
                            depth, True, movesRemaining)
    return bestMove
コード例 #10
0
def minimax(board, color, time, alpha, beta, depth, maximizingPlayer,
            movesRemaining):
    moves = getAllPossibleMoves(board, color)
    if depth == 0 or not moves:
        return evaluation(board, color), []

    if maximizingPlayer == True:
        best = float("-inf")
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            alpha, x = minimax(newBoard, gamePlay.getOpponentColor(color),
                               time, best, beta, depth - 1, False,
                               movesRemaining)
            if best == float("-inf") or alpha > best:
                bestMove = move
                best = alpha
            if best >= beta:
                break
        return best, bestMove
    else:
        best = float("inf")
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            beta, x = minimax(newBoard, gamePlay.getOpponentColor(color), time,
                              alpha, best, depth - 1, True, movesRemaining)
            if best == float("inf") or beta < best:
                bestMove = move
                best = beta
            if alpha >= best:
                break
        return best, bestMove
コード例 #11
0
def alphabetaPruning(board,color,alpha,beta, depth, maxval,movesRemaining):
	if depth == 0 or movesRemaining == 0:  # Checking whether I am in the end node, then I will return my evaluation value
		return evaluation(board, maxColor)
	moves = getAllPossibleMoves(board,color)	# Getting all values of the new board that I passed from nextMove
        opponentColor = gamePlay.getOpponentColor(color) # Getting the opponent color
	test = float("inf") # Assigning value for infinity
	if maxval:    #Checking whether maxval is Max
		value = -test #Assigning the value of -infnity to value
		for move in moves: # looping through all the moves
			evalBoard = deepcopy(board) #Taking the deepcopy of board
			gamePlay.doMove(evalBoard,move)
			#value = evaluation(evalBoard, color)
			#print value, "VALUE"
			value = max(value,alphabetaPruning(evalBoard,opponentColor,alpha,beta, depth-1, False,movesRemaining)) # performing recursive call and assigning the maximum value to the "value"
			alpha = max(alpha, value) # Assigning the max value to alpha
			if beta <= alpha: #checking whether beta is lesser than alpha
				break
		return value # returning the value
	else:
		value = test # Assigning the positive max value to alpha
		for move in moves: # iterating through all the values
			evalBoard = deepcopy(board) #Making a deepcopy
			#value = evaluation(evalBoard, color)	
			gamePlay.doMove(evalBoard,move)
			value = min(value,alphabetaPruning(evalBoard,opponentColor,alpha,beta, depth-1, True, movesRemaining))	# Performing the recursive call and assigning the minimum value to "value"		
			#value = min(move, alphabetaPruning(move, depth-1, True))
			beta = min(beta, value) # Assiging the minumum value to beta by comparing beta and value
			if beta <= alpha: # checking whether beta is lesser than   alpha
				break
		return value	# returning the value
コード例 #12
0
def nextMove(board, color, time, movesRemaining):
    global my_color, init_time, time_set, init_moves
    my_color = color
    print "My move turn\n"
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    bestMove=None

    if len(moves)==1:              ## return the move when only a single move is present 
        bestMove = moves[0]
        print "return the only move left\n"
    else:                                 ## more than one possible move is present.
        best = None
        depth =0
        heuristic = curr_evaluation
        alpha = -float('inf')
        beta = float('inf')
        if not time_set:            ## recording the time given so as to split into intervals
            time_set = True
            init_time = time
            init_moves=movesRemaining
        ##print init_time, "W##########"
        if init_time*3/4 <=time and time<init_time:   ##game is in the first quarter
            if movesRemaining>146:
                depth = random.randrange(2,4)
            elif movesRemaining>138 and movesRemaining <=146:
                depth =5
            elif movesRemaining > init_moves*2/3 and movesRemaining<=138:
                depth=6
            else:
                depth=5
        elif init_time/2 <=time and time< init_time*3/4:     ## game is in the second quarter
            if movesRemaining>init_moves/3 and movesRemaining< 2*init_moves/3:
                depth = random.randrange(3,5)
            elif movesRemaining>0 and movesRemaining <=init_moves/3:
                depth = random.randrange(4,6)
                #heuristic = 
        elif time>0 and time<= init_time/4:
            if movesRemaining>init_moves/3 and movesRemaining< 2*init_moves/3:
                depth = random.randrange(4,7)
            elif movesRemaining>0 and movesRemaining <=init_moves/3:
                depth = random.randrange(6,8)
            else:
                depth=5
                ##heuristic =
            
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)        
            moveVal = miniMax(newBoard,depth,float('inf'), alpha, True, gamePlay.getOpponentColor(color), heuristic)  ### we have already evaluated Max's childs here so, its Min's turn to make a move on each of these childs, so min turn is true.
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
                alpha = moveVal
           
    return bestMove
コード例 #13
0
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    if len(moves) == 1:
        return moves[0]
    opponentColor = gamePlay.getOpponentColor(color)
    equalMoves = []
    best = None
    alpha = None
    beta = float("inf")
    # If the time remaining < 3 seconds, then just apply simpleGreedy and increase depth according to time
    if time < 3:
        depth = 1
    elif time < 10:
        depth = 2
    elif time < 30:
        depth = 4
    else:
        if movesRemaining > 40:
            depth = 8
        else:
            depth = 6

    for move in moves: # this is the max turn(1st level of minimax), so next should be min's turn
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        #Beta is always inf here as there is no parent MIN node. So no need to check if we can prune or not.
        moveVal = evaluation(newBoard, color, depth, 'min', opponentColor, alpha, beta)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
            equalMoves = []
            equalMoves.append(move)
        elif moveVal == best:
            equalMoves.append(move)
        if best > alpha:
            alpha = best
    #So the equalMoves consists of all the moves that have ended up with same value after Minimax evaluation
    if len(equalMoves) > 1:
        #The below logic tries to see if there is any next move that will form a defensive structure from the
        #equalMoves list and returns it.
        for move in equalMoves:
            l = len(move)
            xy = gamePlay.serialToGrid(move[l-1])
            x = xy[0]
            y = xy[1]
            if (x+1) <= 7:
                if (y+1) <= 7 and board[x+1][y+1].lower() == color.lower():
                    return move
                if (y-1) >= 0 and board[x+1][y-1].lower() == color.lower():
                    return move
            if (x-1) >= 0:
                if (y+1) <= 7 and board[x-1][y+1].lower() == color.lower():
                    return move
                if (y-1) >= 0 and board[x-1][y-1].lower() == color.lower():
                    return move
    return bestMove
コード例 #14
0
def alphaBeta(board, move, depth, alpha, beta, maximizingPlayer, color, opColor):
    if depth == 0:
        # Different evaluation values multiplied by weightage
        moveVal = evaluation1(board, color) * 10
        if gamePhase(board, color) == 3:
            kingVal = evaluation2(board, color) * 5
        else:
            kingVal = 0
        capVal = evaluation3(board, color) * 10
        posVal = evaluation4(board, color) * 1 
        promoVal = evaluation5(board, color) * 1 
        if gamePhase(board, color) != 0:
            baseVal = evaluation6(board, color) * 1 
        else:
            baseVal = 0
        flockVal = evaluation7(board, color) * 2 
        # print moveVal, kingVal, capVal, posVal, promoVal, baseVal, flockVal
        bestVal = moveVal + kingVal + capVal + posVal + promoVal + baseVal + flockVal
        return bestVal
    
    if (maximizingPlayer):
        bestVal = -float('inf')
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moves = getAllPossibleMoves(newBoard, color)
        for move in moves:
            bestVal = max(bestVal, alphaBeta(newBoard, move, depth-1, alpha, beta, False, color, opColor))
            alpha = max(alpha, bestVal)
            if beta <= alpha:
                break
        return bestVal

    else:
        bestVal = float('inf')
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moves = getAllPossibleMoves(newBoard, opColor)
        for move in moves:
            bestVal = min(bestVal, alphaBeta(newBoard, move, depth-1, alpha, beta, True, color, opColor))
            beta = min(beta, bestVal)
            if  beta <= alpha:
                break
        return bestVal
コード例 #15
0
def nextMove(board, col, time, movesRemaining):
   
    #print "player color",color
    global currentPlayerColor 
    currentPlayerColor=col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)
    
    '''If there is only 1 move possible no need to evaluate just return that moves'''
    if len(moves)==1:
        return moves[0]
    else:
        best = None
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)       
            alpha = -sys.maxint - 1
            beta = sys.maxint
            '''
            Time and moves remaining are few based on the simple greedy evaluation of the board 
            '''
            if time < 5 or (movesRemaining)<4:
                moveVal = evaluation1(newBoard)
                     
            else:  
                 
                '''
                #if moves remaing are very few we donot go too deep
                '''
                if (movesRemaining/2)<30:
                    moveVal = minimax(newBoard,3,alpha,beta,True)
                    '''inital moves'''    
                elif movesRemaining>140:
                    #start of the game heuristics handles the opening so dont need to go to deep
                    moveVal = minimax(newBoard,3,alpha,beta,True)              
                else:
                    '''last moves'''
                    if time < 5:
                        moveVal = minimax(newBoard,1,alpha,beta,True)
                    elif time<18:
                        moveVal = minimax(newBoard,3,alpha,beta,True)
                    elif time<23:
                        moveVal = minimax(newBoard,5,alpha,beta,True)        
                    elif time<28:
                        moveVal = minimax(newBoard,7,alpha,beta,True)
                        '''middle moves'''    
                    else:
                        moveVal = minimax(newBoard,5,alpha,beta,True)   
            
            #moveVal = minimax(newBoard,depth,alpha,beta,True) #we cal minimax to evaluate
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
        return bestMove
コード例 #16
0
ファイル: minMax.py プロジェクト: namratajagasia/CheckersGame
def nextMove(board, col, time, movesRemaining):
    global deep
    global timeList
    #Assigned global current and  opponent color used to expand nodes and evaluate as per the player
    global currentPlayerColor 
    currentPlayerColor=col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)#returns a list of possible moves[] for the current player
    #Trying to find the move where my game has best score
    if len(moves)==1:
        return moves[0]
    timeList.append(time)
    elapsed=timeList[0]-time
    originalTime = elapsed + time
    print "timeList" , timeList
    if(len(timeList)>=2):
        print "secondLast", timeList[-2]
        differenceTime = timeList[-2]-timeList[-1]
    else:
        differenceTime=0    
    print "last",timeList[-1]
    
    best = None
    
    #if time==0 or ((differenceTime*2) >= timeList[-1]) or countPieces(board, currentPlayerColor)<= differenceTime:
            #or time differnce between last and second last 
    if time<=3 or movesRemaining>=time:
        newBoard = deepcopy(board)
        print "calling random move"
        bestMove = randomMove(newBoard,currentPlayerColor)                
    else:        
        for move in moves:  
            newBoard = deepcopy(board)          
            gamePlay.doMove(newBoard,move)#get the possible states
            depth=deep
            
            alpha = -sys.maxint - 1
            beta = sys.maxint           
            print "calling next move" 
            
            moveVal = minimax(newBoard,deep,alpha,beta,True)
                    
                        
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
        if deep<=7:            
            deep=deep+1
            
    print "increased depth,new depth is ",deep   
    #timeList.append(time)
    print"time list after eval",timeList    
    return bestMove
コード例 #17
0
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moveVal = evaluation(newBoard, color)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #18
0
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    # Trying to find the move where I have best score
    best = bestMove = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moveVal = getMoveVal(newBoard, getOpponentColor(color), time, movesRemaining, 4, False, 0, sys.maxint)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #19
0
def nextMove(board, col, time, movesRemaining):

    #print "player color",color
    global currentPlayerColor
    currentPlayerColor = col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)
    '''If there is only 1 move possible no need to evaluate just return that moves'''
    if len(moves) == 1:
        return moves[0]
    else:
        best = None
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            alpha = -sys.maxint - 1
            beta = sys.maxint
            '''
            Time and moves remaining are few based on the simple greedy evaluation of the board 
            '''
            if time < 5 or (movesRemaining) < 4:
                moveVal = evaluation1(newBoard)

            else:
                '''
                #if moves remaing are very few we donot go too deep
                '''
                if (movesRemaining / 2) < 30:
                    moveVal = minimax(newBoard, 3, alpha, beta, True)
                    '''inital moves'''
                elif movesRemaining > 140:
                    #start of the game heuristics handles the opening so dont need to go to deep
                    moveVal = minimax(newBoard, 3, alpha, beta, True)
                else:
                    '''last moves'''
                    if time < 5:
                        moveVal = minimax(newBoard, 1, alpha, beta, True)
                    elif time < 18:
                        moveVal = minimax(newBoard, 3, alpha, beta, True)
                    elif time < 23:
                        moveVal = minimax(newBoard, 5, alpha, beta, True)
                    elif time < 28:
                        moveVal = minimax(newBoard, 7, alpha, beta, True)
                        '''middle moves'''
                    else:
                        moveVal = minimax(newBoard, 5, alpha, beta, True)

            #moveVal = minimax(newBoard,depth,alpha,beta,True) #we cal minimax to evaluate
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
        return bestMove
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard, move)
        moveVal = evaluation(newBoard, color)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #21
0
def handleError(board, color, time, movesRemaining):
    #fall back to simple greedy, when there is an error in the code.. :)
    moves = getAllPossibleMoves(board, color)
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moveVal = evaluation(newBoard, color)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #22
0
def nextMove(board, color, time, movesRemaining):

	moved = False
	while moved == False:
		print 'Possible moves:', getAllPossibleMoves(board, color)
		moveStr = raw_input("Please enter your move(" + color + "): ")
		exec('move=[' + moveStr + ']')
						
		if gamePlay.isLegalMove(board, move, color):			
			moved = True			
			return move
		else:
			print "Illegal move", str(move)
コード例 #23
0
def nextMove(board, color, time, movesRemaining):

	moved = False
	while moved == False:
		print 'Possible moves:', getAllPossibleMoves(board, color)
		moveStr = raw_input("Please enter your move(" + color + "): ")
		exec('move=[' + moveStr + ']')
						
		if gamePlay.isLegalMove(board, move, color):			
			moved = True			
			return move
		else:
			print "Illegal move", str(move)
コード例 #24
0
ファイル: minMax.py プロジェクト: namratajagasia/CheckersGame
def minimax(board,depth,alpha,beta,maximizingPlayer):
    #for every alternate player traverse upto depth as specified.
    #Alternately expand the nodes for the min and max player.
    #traverse upto the leaf or until the specified depth is reached
    #Then apply the evaluation function
    #backtrack for each node till the calling node is reached and return.
    #in each step alpha-beta prunning is applied and as per conditions the tree is getting prunned .
    global currentPlayerColor   
    global opponentPlayerColor
    if depth==0 or not isAnyMovePossible(board, currentPlayerColor) or not isAnyMovePossible(board,opponentPlayerColor):
        return evaluation(board) 
    if maximizingPlayer:
        v = -sys.maxint - 1
        moves = getAllPossibleMoves(board, opponentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            #print "inside max",color
            #color=getOpponentColor(color)
            v = max(v,minimax(newBoard, depth-1, alpha, beta, False))
            alpha = max(alpha,v)
            if beta <= alpha:
                return alpha
        return v
    else:
        v = sys.maxint
        moves = getAllPossibleMoves(board, currentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            #print "inside min",color
            #color=getOpponentColor(color)
            v = min(v,minimax(newBoard, depth-1, alpha, beta, True))
            beta = min(beta,v)
            if beta <= alpha:
                return beta
        return v
        
コード例 #25
0
def minimax(board, depth, alpha, beta, maximizingPlayer):
    global currentPlayerColor

    global opponentPlayerColor
    if (
        depth == 0
        or not isAnyMovePossible(board, currentPlayerColor)
        or not isAnyMovePossible(board, opponentPlayerColor)
    ):
        return evaluation(board)
    if maximizingPlayer:
        v = -sys.maxint - 1
        moves = getAllPossibleMoves(board, opponentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)

            gamePlay.doMove(newBoard, move)
            # print "inside max",color
            # color=getOpponentColor(color)
            v = max(v, minimax(newBoard, depth - 1, alpha, beta, False))
            alpha = max(alpha, v)
            if beta <= alpha:
                return alpha
        return v
    else:
        v = sys.maxint
        moves = getAllPossibleMoves(board, currentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            # print "inside min",color
            # color=getOpponentColor(color)
            v = min(v, minimax(newBoard, depth - 1, alpha, beta, True))
            beta = min(beta, v)
            if beta <= alpha:
                return beta
        return v
コード例 #26
0
def alphaBetaPruning(node, depth, time):
  moves = getAllPossibleMoves(node[0], node[2])
  bestMove = moves[0]
  #to set the lower bound at max node    
  bestScore = -float("inf")
  for move in moves:
    newBoard = deepcopy(node[0])
    gamePlay.doMove(newBoard,move)
    #creating a new node from the initial board generation
    newNode = returnList(newBoard, node[1]+1, gamePlay.getOpponentColor(node[2]))    
    #calls the min function next
    score = minimum(newNode, depth, -float("inf"), float("inf"), time)
    if score > bestScore:
      bestMove = move
      bestScore = score
  return bestMove
コード例 #27
0
def maxChance(newBoard,depth,color,opponentColor,alpha,beta):
    if depth == 0: # If the depth reaches the value 0, it means we have reached the leave node and now we need to estimate the value at that node using evaluation functions. 
        return evaluation(newBoard,color,opponentColor)
    else:
        maxScore = None
        moves = getAllPossibleMoves(newBoard,color)
        for move in moves:
            nextBoard = deepcopy(newBoard)
            gamePlay.doMove(nextBoard,move)
            if beta > maxScore:
                    score = minChance(nextBoard,depth-1,color, opponentColor, alpha, beta) # This is the min turn, so next should be max's turn. This process continues recursively.
                    if score > maxScore: 
                        maxScore = score
                    if maxScore > alpha:
                        alpha = maxScore
    return maxScore
コード例 #28
0
	def minimize(board, alpha, beta, depth):
		global opponentColor
		global myColor
		#Return a heurisitc based score once the depth limit is reached
                if depth <=0 or not gamePlay.isAnyMovePossible(board, opponentColor):
                        return evaluate(board, myColor)
                score = sys.maxint
                for move in getAllPossibleMoves(board, myColor):
                        newBoard = deepcopy(board)
                        gamePlay.doMove(newBoard, move)
                        score = min(score, maximize(board, alpha, beta, depth-1))
			
			#alpha cut-off
                        if score <= alpha:
                                return score
                        beta = min(beta, score)
                return score
コード例 #29
0
def iterativeDeepeningAlphaBetaPruning(board, time, maxRemainingMoves):
    # Set depth limit depending the available time
    
   


    global myColor
    global opponentColor

    # Don't call mini-max, return the best move at the game start according to the player's color. 
    if maxRemainingMoves == 150:
	if myColor == 'r':
		return [11, 15]
	else:
		return [22, 18]

    moves = getAllPossibleMoves(board, myColor)
   
    #return the only move, if any
    if len(moves) == 1:
	return moves[0]
    depth = 4
    myPieces = gamePlay.countPieces(board, myColor)
    opponentPieces = gamePlay.countPieces(board, opponentColor)

    # piece ratio for deciding the depth
    pieceRatio = myPieces/opponentPieces
    if pieceRatio < 1:
	depth = 6

    if time < 30 and pieceRatio < 1: depth = 3
    elif time < 20 and pieceRatio > 1: depth = 2
    elif time < 10: depth = 1 
    bestMove = None
    best = -sys.maxint-1
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        #Calling mini-max with alpha-beta pruning

        moveVal = alphaBetaPruning(newBoard, depth,time)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #30
0
def minChance(newBoard,depth,color,opponentColor,alpha,beta):
    if depth == 0: #If the depth reaches the value 0, it means we have reached the leave node and now we need to estimate the value at that node using evaluation functions.
        return evaluation(newBoard,color,opponentColor)
    else:
        minimumScore = None
        moves = getAllPossibleMoves(newBoard, opponentColor)
        for move in moves:
            nextBoard = deepcopy(newBoard)
            gamePlay.doMove(nextBoard,move)
            #If the alpha score is less than the minimum score then rest of the nodes can be pruned. 
            if alpha == None or minimumScore == None or alpha < minimumScore: #None is less than everything and anything
                score = maxChance(nextBoard,depth-1, color, opponentColor, alpha, beta) # This is the min turn, so next should be max's turn. This process continues recursively. 
                if minimumScore == None or score < minimumScore:
                    minimumScore = score
                if minimumScore < beta:
                    beta = minimumScore

    return minimumScore
コード例 #31
0
ファイル: vpalakur.py プロジェクト: doganaltinbas/Checkers-AI
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    opponentColor = gamePlay.getOpponentColor(color)
    depth = 5
    best = None
    alpha = None
    beta = float("inf")
    for move in moves: # this is the max turn(1st level of minimax), so next should be min's turn
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        #Beta is always inf here as there is no parent MIN node. So no need to check if we can prune or not.
        moveVal = evaluation(newBoard, color, depth, 'min', opponentColor, alpha, beta)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
        if best > alpha:
            alpha = best
    return bestMove
コード例 #32
0
        def maximize(board, alpha, beta, depth, time):
                global opponentColor
		global myColor
		#Return a heurisitc based score once the depth limit is reached
	
		if depth <= 0 or not gamePlay.isAnyMovePossible(board, opponentColor) or time < 7:
                        return evaluate(board, myColor)
                score = -sys.maxint-1
                for move in getAllPossibleMoves(board, opponentColor):
                        newBoard = deepcopy(board)
                        gamePlay.doMove(newBoard, move)
                        score = max(score, minimize(newBoard, alpha, beta, depth-1, time))
			
			#beta cut-off
                        if score >= beta:
                                return score
                        alpha = max(alpha, score)
                return score
コード例 #33
0
def nextMove(board, color, time, movesRemaining):
    test = float("inf") # Assigning the inifinity to test
    global maxColor # making the maxColor global variable that has been assigned at the top
    maxColor = color #Assigning the max color to maxColor
    moves = getAllPossibleMoves(board, color) #getting all posible moves
    opponentColor = gamePlay.getOpponentColor(color) #getting the moves of opponent
    #Trying to find the move where I have best score
    best = None
    finalVal = 0
    for move in moves: #iterating throug the board
        newBoard = deepcopy(board) #making the newcopy of board
        gamePlay.doMove(newBoard,move) #performing the move action
	if movesRemaining >50:
		finalVal = alphabetaPruning(newBoard,opponentColor,-test,test, 5, False,movesRemaining) #calling alphabeta pruning function
	else:
		finalVal = alphabetaPruning(newBoard,opponentColor,-test,test, 3, False,movesRemaining)
        if best == None or finalVal > best: #checking the condition 
            bestMove = move
            best = finalVal
    return bestMove #Returning the best Move
コード例 #34
0
def nextMove(board, color, time, movesRemaining):
    
    #Trying to find the move where I have best score
    global myColor
    global opponentColor
    myColor = color
    opponentColor = gamePlay.getOpponentColor(color)
    moves = getAllPossibleMoves(board, color) 
    bestMove = None
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        #Calling mini-max with alpha-beta pruning

      	moveVal = alphaBetaPruning(newBoard)
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #35
0
def nextMove(board, col, time, movesRemaining):
   
    #print "player color",color
    global currentPlayerColor 
    currentPlayerColor=col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)#returns a list of possible moves[]
    #Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)#get the possible states
        depth=5
        alpha = -sys.maxint - 1
        beta = sys.maxint
        moveVal = minimax(newBoard,depth,alpha,beta,True) #we cal minimax to evaluate
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #36
0
def getMoveVal(board, color, time, movesRemaining, depth, calculateMax, alpha, beta):
    if depth == 0 or movesRemaining == 0:
        if calculateMax:
            return evaluation(board, color)
        return evaluation(board, getOpponentColor(color))
    moves = getAllPossibleMoves(board, color)
    # Trying to find the move where I have best score
    bestChild = 0 if calculateMax else sys.maxint
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move)
        moveVal = getMoveVal(newBoard, getOpponentColor(color), time, movesRemaining, depth - 1, not calculateMax, alpha, beta)
        if calculateMax:
            bestChild = max(moveVal, bestChild)
            alpha = max(alpha, moveVal)
        else:
            bestChild = min(moveVal, bestChild)
            beta = min(beta, moveVal)
        if beta <= alpha:
            break
    return bestChild
コード例 #37
0
def iterativeDeepeningAlphaBetaPruning(board, color, player, time, maxRemainingMoves):
    bestMove = None
    best = None
    depth = 12
    
    moves = getAllPossibleMoves(board, color)
    # Set depth limit depending the available time
    if time > 100: depth = 12
    if 50 > time and time < 100: depth = 10
    if 10 > time and time < 50: depth = 7
    if 2 > time and time < 10: depth = 3

    # Evaluate all avaiable moves using alpha-beta pruning with a given depth
    for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard,move)
            moveVal = alphaBetaPruning(newBoard, color, sys.maxint, -sys.maxint-1, depth) 
            if best == None or moveVal > best:
                bestMove = move
                best = moveVal
    return bestMove
コード例 #38
0
def nextMove(board, col, time, movesRemaining):

    # print "player color",color
    global currentPlayerColor
    currentPlayerColor = col
    global opponentPlayerColor
    opponentPlayerColor = getOpponentColor(col)
    moves = getAllPossibleMoves(board, currentPlayerColor)  # returns a list of possible moves[]
    # Trying to find the move where I have best score
    best = None
    for move in moves:
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard, move)  # get the possible states
        depth = 5
        alpha = -sys.maxint - 1
        beta = sys.maxint
        moveVal = minimax(newBoard, depth, alpha, beta, True)  # we cal minimax to evaluate
        if best == None or moveVal > best:
            bestMove = move
            best = moveVal
    return bestMove
コード例 #39
0
def maximum(maxNode, depth, alpha, beta, time):
  #print "depth max =  ", maxNode[1]
  if maxNode[1] == depth:
    # Since the depth is reached, the leaf node is now evluatated and a value is returned to the parent node
    return evaluation(maxNode[0],maxNode[2], maxNode[1], time)
  moves = getAllPossibleMoves(maxNode[0], maxNode[2])
  #lower bound for max
  bestScore = -float("inf")
  for move in moves:
    newBoard = deepcopy(maxNode[0])
    gamePlay.doMove(newBoard, move)
    newNode = returnList(newBoard, maxNode[1]+1, gamePlay.getOpponentColor(maxNode[2]))
    #calls minimum as next turn is of MIN node or the opponent
    score = minimum(newNode, depth, alpha, beta,time)
    #beta is the upper bound which is to be found so if max node is greater than next nodes, they can be pruned
    if bestScore >= beta:
      return bestScore
    if bestScore > alpha:
      alpha = bestScore
    if score > bestScore:            
      bestScore = score
  return bestScore
コード例 #40
0
def minimax(board, maxP, color, alpha, beta, depth):
#calculates maximum when maxP is True and minimum otherwise
    infty = float('inf')
    moves = getAllPossibleMoves(board, color)
    opponentColor = gamePlay.getOpponentColor(color)
    if maxP:
        if moves == [] or depth == 0:
            return [], evaluation(board)
        best_score = -infty
        best_move = moves[0]
        for move in moves:
            boardTemp = deepcopy(board)
            gamePlay.doMove(boardTemp,move)
            m,score = minimax(boardTemp,False,opponentColor,alpha,beta,depth-1)
            if score > best_score:
                best_move = move
                best_score = score
            alpha = max(alpha,score)
#Beta cut-off
            if alpha >= beta:
                break
    else:
        if moves == [] or depth == 0:
            return [], evaluation(board)
        best_score = infty
        best_move = moves[0]
        for move in moves:
            boardTemp = deepcopy(board)
            gamePlay.doMove(boardTemp,move)
            m,score = minimax(boardTemp,True,opponentColor,alpha,beta,depth-1)
            if score < best_score:
                best_move = move
                best_score = score
            beta = min(beta,score)
#Alpha cut-off
            if alpha >= beta:
                break
                
    return best_move, best_score
コード例 #41
0
def nextMove(board, color, time, movesRemaining):

    possibleMoves = getAllPossibleMoves(board, color)
    opponentColor = gamePlay.getOpponentColor(color) 

    #Trying to find the move where I have best score
    bestValue = None
    bestmove = possibleMoves[0]
    depth = 5
    alpha = float("-inf")
    beta = float("inf")
    for move in possibleMoves: 
        newBoard = deepcopy(board)
        gamePlay.doMove(newBoard,move) #
        #retrieves the value from minChance.
        #Here, alpha is the lower bound of the actual MiniMax value and beta is the upper bound of the actual MiniMax value.         
        score = minChance(newBoard, depth-1,color, opponentColor, alpha, beta) # This is the max turn(1st level of minimax), so next should be min's turn
        if bestValue == None or score > bestValue:
        #If the better score is found than the current best value then that move is considered as the best move. Hence, it is taken. 
            bestMove = move
            bestValue = score
        if bestValue > alpha:
            alpha = bestValue
    return bestMove
コード例 #42
0
def nextMove(board, color, time, movesRemaining):
    # I will have different depth level as per time remaining
    if time > 120: 
        depth = 8
    elif time > 60: 
        depth = 6
    elif time > 5: 
        depth = 4
    else:
        depth = 2
    #print "DBG", phase, depth, time
    #t1 = datetime.datetime.now()
    moves = getAllPossibleMoves(board, color)
    best = None
    opColor = gamePlay.getOpponentColor(color)
    # If only one move available no need for any computations
    if len(moves) == 1:
        bestMove = moves[0]
        return bestMove
    # Strategy1: If it is first move hardcoding the best move
    if isFirstMove(board, color):
        if color.upper() == 'R':
            bestMove = [11, 15]
        else:
            bestMove = [22, 18]
        return bestMove

    # Strategy2: Remove moves that lead to opponent captures my piece
    for move in moves:
        myBoard = deepcopy(board)
        gamePlay.doMove(myBoard,move)
        # Check if opponents capturing position gone
        if gamePlay.isCapturePossible(myBoard, opColor) == True:
            moves.remove(move)

    # Strategy3: Opponent captures one, but I capture double
    # Check if newBoard is giving Opponent a capture
    opMoves = getAllPossibleMoves(board, opColor)
    # At most of the capture, opMoves will have one element
    if gamePlay.isCapturePossible(board, opColor) == True:
        opMove = opMoves[0]
        opBoard = deepcopy(board)
        gamePlay.doMove(opBoard, opMove)
        myMoves = getAllPossibleMoves(opBoard, color)
        # Check if I can capture two pieces
        if len(myMoves) > 0:
            myMove = myMoves[0]
            # At double-capture, lenghth of the move will be >2
            if len(myMove) > 2:
                # Do nothing, let opponent capture
                pass
            else: 
                # Try to block the capture
                for move in moves:
                    myBoard = deepcopy(board)
                    gamePlay.doMove(myBoard,move)
                    opMoves = getAllPossibleMoves(myBoard, opColor)
                    #Check if opponents capturing position gone
                    if  not gamePlay.isCapturePossible(board, opColor):
                        bestMove = move
                        return bestMove

    # Now start the main MiniMax and alpha-beta here
    for move in moves:
        # Strategy4: If I can double-capture, thats best move, kind of greedy
        if len(move) > 2:
            bestMove = move
            return bestMove
        newBoard = deepcopy(board)
        alpha = -float('inf')
        beta = float('inf')
        # Calling with different depth depending on time remaining in the game
        alphaVal = alphaBeta(newBoard, move, depth, alpha, beta, True, color, opColor)
        if best == None or alphaVal > best:
            bestMove = move
            best = alphaVal

    #t2 = datetime.datetime.now()
    return bestMove
コード例 #43
0
def nextMove(board, color, time, movesRemaining):
    '''Just play randomly among the possible moves'''
    moves = getAllPossibleMoves(board, color)    
    bestMove = moves[random.randint(0,len(moves) - 1)]
    return bestMove
コード例 #44
0
def nextMove(board, color, time, movesRemaining):
    moves = getAllPossibleMoves(board, color)
    bestMove = moves[random.randint(0, len(moves) - 1)]
    return bestMove
コード例 #45
0
def minimax(board, depth, alpha, beta, maximizingPlayer):
    global currentPlayerColor

    global opponentPlayerColor
    if depth == 0 or not isAnyMovePossible(
            board, currentPlayerColor) or not isAnyMovePossible(
                board, opponentPlayerColor):
        if countPieces(board, currentPlayerColor) > 7:
            '''
            initial stage and opening moves trying to focus on center and attack
            '''
            return (0.75 * evaluationMovingToCenter(board)) + (
                0.20 * evaluationAttackFunction(board)) + (
                    0.5 * evaluationColorVsOpposite(board))

        elif countPieces(board, currentPlayerColor) >= 6:
            '''
            middle stage have to be defensive And would also need to be attacktive
            '''
            return (0.75 * evaluationMovingToDefense(board)) + (
                0.15 * evaluationAttackFunction(board)) + (
                    0.5 * evaluationColorVsOpposite(board) +
                    (0.5 * evaluationMakingItKing(board)))
        elif countPieces(board, currentPlayerColor) > 4:
            '''
            middle stage have to be get in center amd attack
             
            '''
            return (0.20 * evaluationMovingToCenter(board)) + (
                0.70 * evaluationAttackFunction(board)) + (
                    0.5 * evaluationColorVsOpposite(board) +
                    (0.5 * evaluationMakingItKing(board)) +
                    evaluationCanBeAttacked(board))
        else:
            '''
            when 4 or less than four pieces are remaining
            '''
            return (0.50 * evaluationAttackFunction(board)) + (
                0.30 * evaluationColorVsOpposite(board) +
                (0.20 * evaluationMovingToDefense(board)) +
                evaluationCanBeAttacked(board))
    if maximizingPlayer:
        v = -sys.maxint - 1
        moves = getAllPossibleMoves(board, opponentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)

            gamePlay.doMove(newBoard, move)
            #print "inside max",color
            #color=getOpponentColor(color)
            v = max(v, minimax(newBoard, depth - 1, alpha, beta, False))
            alpha = max(alpha, v)
            if beta <= alpha:
                return alpha
        return v
    else:
        v = sys.maxint
        moves = getAllPossibleMoves(board, currentPlayerColor)
        for move in moves:
            newBoard = deepcopy(board)
            gamePlay.doMove(newBoard, move)
            #print "inside min",color
            #color=getOpponentColor(color)
            v = min(v, minimax(newBoard, depth - 1, alpha, beta, True))
            beta = min(beta, v)
            if beta <= alpha:
                return beta
        return v