def alphabeta(board, color, depth, alpha, beta, moveMade, isMax, timeoutAt=None):
    # alphabeta failed to find a solution in time alotted
    if timeoutAt and time.time() > timeoutAt:
        raise threading.ThreadError('alphabeta timed out before finding a solution')

    if depth == 0 or gameOver(board):
        #print 'bottomed out at %s' % str((simpleHeuristic(board), moveMade))
        return (simpleHeuristic(board), moveMade)
    if isMax:
        for movePos in children(board, color):
            try:
                childScore, childMove = alphabeta(childBoard(board, color, movePos), opponent(color), depth-1, alpha, beta, movePos, False)
            except threading.ThreadError as e:
                raise e
            alpha = (childScore, movePos) if childScore > alpha[0] else alpha
            if beta[0] <= alpha[0]:
                break
        return alpha
    else:
        for movePos in children(board, color):
            try:
                childScore, childMove = alphabeta(childBoard(board, color, movePos), opponent(color), depth-1, alpha, beta, movePos, True)
            except threading.ThreadError as e:
                raise e
            beta = (childScore, movePos) if childScore < beta[0] else beta
            if beta[0] <= alpha[0]:
                break
        return beta
def maxVal(node,color,depth,a,b): 

    #if game is over or reached search depth, calculate score.
    #here, I simply use the score function provided in gameplay.
    if gameOver(node.state) or depth==0:
        black,white=score(node.state)
        if (color == "B"):
            node.value=black;
            return black
        elif (color == "W"):
            node.value=white;
            return white
    #set v value to a very small number
    v=-999;
    #populate children
    node.populateChildren(color);
    #if no children, just get value at current step
    if len(node.children)==0:
        return maxVal(node,color,0,a,b);
    #for each child in next depth, calculate opponent score
    for child in node.children:
        returnValue=minVal(child,opponent(color),depth-1,a,b);
        v=max(v,returnValue);
        node.value=v;
        if v>=b:
            return v;
        a=max(a,v);
    return v;
def minVal(node,color,depth,a,b):
    #if game is over or reached search depth, calculate score.
    if gameOver(node.state) or depth==0:
        black,white=score(node.state)
        if (color == "B"):
            node.value=black;
            return black
        elif (color == "W"):
            node.value=white;
            return white
    #set v value to a very large number
    v=999;
    #populate children
    node.populateChildren(color);
    #if no children, just get value at current step
    if len(node.children)==0:
        return minVal(node,color,0,a,b);
    #for each child in next depth, calculate opponent score
    for child in node.children:
        returnValue=maxVal(child,opponent(color),depth-1,a,b)
        v=min(v,returnValue);
        node.value=v;
        if v<=a:
            return v;
        b=min(b,v);
    return v
Example #4
0
def alphabeta(board, color, depth, alpha, beta, moveMade, isMax):
    if depth == 0 or gameOver(board):
        #print 'bottomed out at %s' % str((simpleHeuristic(board), moveMade))
        return (simpleHeuristic(board, color), moveMade)
    if isMax:
        for movePos in children(board, color):
            childScore, childMove = alphabeta(childBoard(board, color, movePos), opponent(color), depth-1, alpha, beta, movePos, False)
            alpha = (childScore, movePos) if childScore > alpha[0] else alpha
            if beta[0] <= alpha[0]:
                break
        return alpha
    else:
        for movePos in children(board, color):
            childScore, childMove = alphabeta(childBoard(board, color, movePos), opponent(color), depth-1, alpha, beta, movePos, True)
            beta = (childScore, movePos) if childScore < beta[0] else beta
            if beta[0] <= alpha[0]:
                break
        return beta
Example #5
0
def min_value(board, color, depth):
	'''Calculate the min value. The comments are the same as in max_value()'''
	global alpha, beta
	if gamePlay.gameOver(board) or depth == 0:
		return evaluation(board, color)
	depth -= 1
	val = 100000
	next_positions = get_successors(board, color)
	next_color = gamePlay.opponent(color)
	if len(next_positions) == 0:
		return evaluation(board, color)
	for pos in next_positions:
		tempboard = deepcopy(board)
		gamePlay.doMove(tempboard, color, pos)
		val = min(val, max_value(tempboard, next_color, depth))
		if val <= alpha:
			return val
		beta = min(beta, val) # Update beta value
	return val
Example #6
0
def alpha_beta(board, color, depth, alpha, beta):
	"""Find the utility value of the game and the best_val move in the game."""
	
	if depth == 0:
		return eval_fn(board, color)
	if gamePlay.gameOver(board):
		return gamePlay.score(board)
	
	moves = []
	for row in range(8):
		for col in range(8):
			if gamePlay.valid(board, color, (row, col)):
				moves.append((row, col))
	#shuffle the moves in case it places the same position in every game
	#shuffle(moves)
	if len(moves) == 0:
		return "pass"
	if moves == "pass":
		return eval_fn(board, color)

	opp = gamePlay.opponent(color)
	# try each move
	#evaluate max's position and choose the best value
	if color == "B":
		for move in moves:
			newBoard = board[:]
			gamePlay.doMove(newBoard, color, move)
			#cut off the branches
			alpha = max(alpha, alpha_beta(newBoard, opp, depth-1, alpha, beta))
			if beta <= alpha:
				return
		return alpha
	#evaluate min's position and choose the best value
	if color == "W":
		for move in moves:
			newBoard = board[:]
			gamePlay.doMove(newBoard, color, move)
			#cut off the branches
			beta = min(beta, alpha_beta(newBoard, opp, depth-1, alpha, beta))
			if beta <= alpha:
				return
		return beta
Example #7
0
def minimax(board, color, depth):
  	#Find the best move in the game
  	#if depth = 0, we calculate the score
    if depth == 0:
    	return eval_fn(board, color)
    #if game is over, we calculate the score
    if gamePlay.gameOver(board):
        return gamePlay.score(board)

    best_val = None
    best_move = None
    opp = gamePlay.opponent(color)
    # valid moves
    moves = []
    for row in range(8):
    	for col in range(8):
    		if gamePlay.valid(board, color, (row,col)):
			 	moves.append((row,col))
	#shuffle the moves in case it places the same position in every game
	#shuffle(moves)
	if len(moves) == 0:
		return "pass"
	if move == "pass":
		return eval_fn(board, color)
	#try each move in valid moves
    #evaluate max's position and choose the best value
	if color == "B":
		for move in moves:
			newBoard = board[:]
    		gamePlay.doMove(newboard, color, move)
    		val = minimax(newBoard, opp, depth-1)
    		if best_val is None or val > (best_val, best_move)[0]:
				(best_val, best_move) = (val, move)
    #evaluate min's position and choose the best value
    if color == "W":
    	for move in moves:
			newBoard = board[:]
			gamePlay.doMove(newboard, color, move)
			val = minimax(newBoard, opp, depth-1)
			if best_val is None or val < (best_val, best_move)[0]:
				(best_val, best_move) = (val, move)
    return (best_val, best_move)[0]
Example #8
0
def max_value(board, color, depth):
	'''Calculate the max value'''
	global alpha, beta
	if gamePlay.gameOver(board) or depth == 0:
		'''If the game board is full of pieces, or reach the search depth, give the evaluation of the situation'''
		return evaluation(board, color)
	depth -= 1
	val = -100000
	next_positions = get_successors(board, color) # Get the child nodes
	next_color = gamePlay.opponent(color) # Get the opponent's color
	if len(next_positions) == 0:
		return evaluation(board, color) # If no child node, return the evaluation
	for pos in next_positions:
		tempboard = deepcopy(board) # Create a new board for mock moves
		gamePlay.doMove(tempboard, color, pos) # Do a mock move
		val = max(val, min_value(tempboard, next_color, depth))
		'''Alpha-beta pruning'''
		if val >= beta:
			return val
		alpha = max(alpha, val) # Update alpha value
	return val
def minmax(node,depth,color,bool_player):
    if gameOver(node.state) or depth==0:
        return heurisic(node,color)
    if bool_player:
        best=-999;
        node.populateChildren(color);
        if len(node.children)==0:
            return heuristic(node,color);
        for child in node.children:
            val=minmax(child,depth-1,False);
            best=max(best,val);
        return best
    else:
        best=999;
        node.populateChildren(color);
        if len(node.children)==0:
            return heuristic(node,color);
        for child in node.children:
            val=minmax(child,depth-1,True);
            best=min(best,val);
        return best
def alphaBeta(node, depth, currentPlayerTurn, color, alpha, beta):
    if depth==0 or gameOver(node.state):
        black,white=score(node.state);
        if(color=="W"):
            node.value=white;
            #print node.value
            return white; 
        elif (color=="B"):
            node.value=black;
            #print node.value
            return black;

    if currentPlayerTurn:
        #expand children:
        node.populateChildren();
        for child in node.children:
            returnedval=alphaBeta(child,depth-1,False,color,alpha,beta)
            #print "alpha ret:",returnedval
            #alpha=max(alpha,alphaBeta(child,depth-1,False,color,alpha,beta));
            alpha=max(alpha,returnedval);
            if beta <= alpha:
                break;
        node.value=alpha;

        #print "alpha, b:",beta, "a:",alpha
        return alpha;
    else:
        #expand children:
        node.populateChildren();
        for child in node.children:
            returnedval=alphaBeta(child,depth-1,False,color,alpha,beta)
            #print alphaBeta(child,depth-1,False,color,alpha,beta)
            #beta=min(beta,alphaBeta(child,depth-1,False,color,alpha,beta));
            beta=min(beta,returnedval);
            #print "beta ret:",returnedval, beta
            if beta <= alpha:
                break;
        node.value=beta;
        #print "beta, b:",beta, "a:",alpha, "len:",len(node.children)
        return beta;
Example #11
0
def eval_fn(board, color):
	# if the game is over, give a 100 point bonus to the winning player
    if gamePlay.gameOver(board):
        point = gamePlay.score(board)
        if point > 0:
            return 100
        elif point < 0:
            return -100
        else:
            return 0
    point = 0
    #find the color of the opponent
    opp = gamePlay.opponent(color)
    for row in range(8):
        for col in range(8):
            #calculate the point of current player
            if board[row][col] == color:
                point += gradingStrategy[(row+1)*10+1+col]
            #calculate the point of the opponent
            elif board[row][col] == opp:
                point -= gradingStrategy[(row+1)*10+1+col]
    return point