def nextMove(board, color, time, reversed = False): moves = [] for i in range(8) : for j in range(8) : if gameplay.valid(board, color, (i,j)) : moves.append((i,j)) if len(moves) == 0: return 'pass' if len(moves) > 8: depth = 3 elif len(moves) < 6: depth = 5 else: depth = 4 if time < 60: depth = 3 elif time < 40: depth = 2 best = None for move in moves: newBoard = deepcopy(board) gameplay.doMove(newBoard,color,move) moveVal = alphaBeta(newBoard, gameplay.opponent(color), reversed, depth) if best == None or betterThan(moveVal, best, color, reversed): bestMove = move best = moveVal return bestMove
def __init__(self, parent=None): if parent: self.board = deepcopy(parent.board) self.depth = parent.depth - 1 self.color = god.opponent(parent.color) self.reversed = parent.reversed self.nextmove = None
def alphaBeta(board, color,reverse, depth, strategy, alpha = -sys.maxint, beta = sys.maxint) : if depth == 0 : if strategy == 1 : return evalue(board) else: return value(board) moves = [] for i in range(8) : for j in range(8) : if gameplay.valid(board, color, (i,j)) : moves.append((i,j)) if len(moves) == 0 : if (gameplay.gameOver(board)) : return evalue(board) else: if (color == "B" and not reverse) or (color == "W" and reverse) : newBoard = deepcopy(board) gameplay.doMove(newBoard,color,'pass') val = max(alpha, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) return val else : newBoard = deepcopy(board) gameplay.doMove(newBoard,color,'pass') val = min(beta, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) return val else: if (color == "B" and not reverse) or (color == "W" and reverse) : for move in moves : newBoard = deepcopy(board) gameplay.doMove(newBoard,color,move) alpha = max(alpha, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) if beta <= alpha : break return alpha else : for move in moves : newBoard = deepcopy(board) gameplay.doMove(newBoard,color,move) beta = min(beta, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) if beta <= alpha: break return beta
def mobility(board, ori_color): mp_self = movepoint(board, ori_color) mp_op = movepoint(board, gameplay.opponent(ori_color)) if mp_self > mp_op: return 100 * mp_self / (mp_self + mp_op) elif mp_op > mp_self: return -100 * mp_op / (mp_self + mp_op) else: return 0
def mobility(board,ori_color): mp_self=movepoint(board,ori_color) mp_op=movepoint(board,gameplay.opponent(ori_color)) if mp_self>mp_op: return 100*mp_self/(mp_self+mp_op) elif mp_op>mp_self: return -100*mp_op/(mp_self+mp_op) else: return 0
def near_corner(board,ori_color): nc_self=0 nc_op=0 side=[[0,1],[1,1],[1,0],[0,6],[1,6],[1,7],[6,0],[6,1],[7,1],[7,6],[6,7],[6,6]] for k in side: if board[k[0]][k[1]]== ori_color: nc_self+=1 elif board[k[0]][k[1]]==gameplay.opponent(ori_color): nc_op+=1 return -(nc_self-nc_op)
def corner_point(board,ori_color): cp_self=0 cp_op=0 corner=[0,7] for i in corner: for j in corner: if board[i][j]==ori_color: cp_self+=1 elif board[i][j]==gameplay.opponent(ori_color): cp_op=+1 return cp_self-cp_op
def corner_point(board, ori_color): cp_self = 0 cp_op = 0 corner = [0, 7] for i in corner: for j in corner: if board[i][j] == ori_color: cp_self += 1 elif board[i][j] == gameplay.opponent(ori_color): cp_op = +1 return cp_self - cp_op
def near_corner(board, ori_color): nc_self = 0 nc_op = 0 side = [[0, 1], [1, 1], [1, 0], [0, 6], [1, 6], [1, 7], [6, 0], [6, 1], [7, 1], [7, 6], [6, 7], [6, 6]] for k in side: if board[k[0]][k[1]] == ori_color: nc_self += 1 elif board[k[0]][k[1]] == gameplay.opponent(ori_color): nc_op += 1 return -(nc_self - nc_op)
def evaluation(state, color): result = 0 weight = [[99, -8, 8, 6, 6, 8, -8, 99], [-8, -24, -4, -3, -3, -4, -24, -8], [8, -4, 7, 4, 4, 7, -4, 8], [6, -3, 4, 0, 0, 4, -3, 6], [6, -3, 4, 0, 0, 4, -3, 6], [8, -4, 7, 4, 4, 7, -4, 8], [-8, -24, -4, -3, -3, -4, -24, -8], [99, -8, 8, 6, 6, 8, -8, 99]] for i in range(8): for j in range(8): if state[i][j] == color: result += weight[i][j] if state[i][j] == gameplay.opponent(color): result -= weight[i][j] #if reversed: # result = -result return result
def evaluation(state, color): result = 0 weight = [[99,-8,8,6,6,8,-8,99],[-8,-24,-4,-3,-3,-4,-24,-8], [8,-4,7,4,4,7,-4,8],[6,-3,4,0,0,4,-3,6], [6,-3,4,0,0,4,-3,6],[8,-4,7,4,4,7,-4,8], [-8,-24,-4,-3,-3,-4,-24,-8],[99,-8,8,6,6,8,-8,99]] for i in range(8): for j in range(8): if state[i][j] == color: result += weight[i][j] if state[i][j] == gameplay.opponent(color): result -= weight[i][j] #if reversed: # result = -result return result
def nextMove(board, color, time, reversed = False): global depth global otime moves = [] for i in range(8): for j in range(8): if gameplay.valid(board, color, (i,j)): moves.append((i,j)) if len(moves) == 0: return "pass" score = gameplay.score(board) num = score[0] + score[1] if len(moves) > 9: depth = 5 if len (moves) < 7: depth = 6 if time < 40 and num < ident_strategy: depth = 3 if depth > 6: depth = 6 if num >= ident_strategy: d = 8*8-num strategy = 1 else: d = depth strategy = 0 best = None for move in moves: newBoard = deepcopy(board) gameplay.doMove(newBoard,color,move) moveVal = alphaBeta(newBoard, gameplay.opponent(color), reversed, d, strategy) if best == None or betterThan(moveVal, best, color, reversed): bestMove = move best = moveVal otime = time return bestMove
def nextMove(board, color, time, reversed=False): global depth global otime moves = [] for i in range(8): for j in range(8): if gameplay.valid(board, color, (i, j)): moves.append((i, j)) if len(moves) == 0: return "pass" score = gameplay.score(board) num = score[0] + score[1] if len(moves) > 9: depth = 5 if len(moves) < 7: depth = 6 if time < 40 and num < ident_strategy: depth = 3 if depth > 6: depth = 6 if num >= ident_strategy: d = 8 * 8 - num strategy = 1 else: d = depth strategy = 0 best = None for move in moves: newBoard = deepcopy(board) gameplay.doMove(newBoard, color, move) moveVal = alphaBeta(newBoard, gameplay.opponent(color), reversed, d, strategy) if best == None or betterThan(moveVal, best, color, reversed): bestMove = move best = moveVal otime = time return bestMove
def min_val(state, alpha, beta, depth, color, reversed=False): if endState(state): return None, utility(state, color) elif depth == 0: return None, evaluation(state, color) best = None v = INF if not reversed: moves = successors(state, gameplay.opponent(color)) else: moves = successors(state, color) for (move, state) in moves: value = max_val(state, alpha, beta, depth - 1, color, reversed)[1] if best is None or value < v: best = move v = value if alpha >= v: return best, v beta = min(beta, v) return best, v
def maxvalue(board, color, ori_color, deepest, search_depth, alpha, beta): search_depth = search_depth + 1 if search_depth == deepest or gameplay.gameOver(board): return calculate(board, search_depth, ori_color) moves = [] bestmove = "pass" for i in range(8): for j in range(8): if gameplay.valid(board, color, (i, j)): moves.append((i, j)) v = -float('inf') if len(moves) == 0: if search_depth == 1: return float('inf'), "pass" else: return float('inf') # print 'max',search_depth # print moves for move in moves: newBoard = deepcopy(board) gameplay.doMove(newBoard, color, move) min_value = minvalue(newBoard, gameplay.opponent(color), ori_color, deepest, search_depth, alpha, beta) if min_value > v: # if search_depth==1: # print "v=",v,"min_value=",min_value v = min_value if search_depth == 1: bestmove = move if v >= beta: if search_depth == 1: return v, bestmove else: return v alpha = max(alpha, v) if search_depth == 1: return v, bestmove else: return v
def maxvalue(board,color,ori_color,deepest,search_depth,alpha,beta): search_depth=search_depth+1 if search_depth == deepest or gameplay.gameOver(board): return calculate(board,search_depth,ori_color) moves=[] bestmove="pass" for i in range(8): for j in range(8): if gameplay.valid(board, color, (i,j)): moves.append((i,j)) v=-float('inf') if len(moves) == 0: if search_depth==1: return float('inf'),"pass" else: return float('inf') # print 'max',search_depth # print moves for move in moves: newBoard = deepcopy(board) gameplay.doMove(newBoard,color,move) min_value=minvalue(newBoard,gameplay.opponent(color),ori_color,deepest,search_depth,alpha,beta) if min_value>v: # if search_depth==1: # print "v=",v,"min_value=",min_value v=min_value if search_depth==1: bestmove=move if v>=beta: if search_depth==1: return v,bestmove else: return v alpha=max(alpha,v) if search_depth==1: return v,bestmove else: return v
def alphaBeta(board, color, reverse, depth, strategy, alpha=-sys.maxint, beta=sys.maxint): if depth == 0: if strategy == 1: return evalue(board) else: return value(board) moves = [] for i in range(8): for j in range(8): if gameplay.valid(board, color, (i, j)): moves.append((i, j)) if len(moves) == 0: if (gameplay.gameOver(board)): return evalue(board) else: if (color == "B" and not reverse) or (color == "W" and reverse): newBoard = deepcopy(board) gameplay.doMove(newBoard, color, 'pass') val = max( alpha, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) return val else: newBoard = deepcopy(board) gameplay.doMove(newBoard, color, 'pass') val = min( beta, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) return val else: if (color == "B" and not reverse) or (color == "W" and reverse): for move in moves: newBoard = deepcopy(board) gameplay.doMove(newBoard, color, move) alpha = max( alpha, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) if beta <= alpha: break return alpha else: for move in moves: newBoard = deepcopy(board) gameplay.doMove(newBoard, color, move) beta = min( beta, alphaBeta(newBoard, gameplay.opponent(color), reverse, depth - 1, strategy, alpha, beta)) if beta <= alpha: break return beta
def value(board, ori_color, search_depth, total_num): weight = [[99, -8, 8, 6, 6, 8, -8, 99], [-8, -24, -4, -3, -3, -4, -24, -8], [8, -4, 7, 4, 4, 7, -4, 8], [6, -3, 4, 0, 0, 4, -3, 6], [6, -3, 4, 0, 0, 4, -3, 6], [8, -4, 7, 4, 4, 7, -4, 8], [-8, -24, -4, -3, -3, -4, -24, -8], [99, -8, 8, 6, 6, 8, -8, 99]] value_point = 0 num_self = 0 #total_num num_op = 0 num_point = 0 frontier_self = 0 #element is on the frontier frontier_op = 0 frontier_point = 0 stable_self = 0 #element is stable and the element is on the side stable_op = 0 stable_point = 0 border_x = [-1, 0, 1, -1, 1, -1, 0, 1] border_y = [1, 1, 1, 0, 0, -1, -1, -1] for i in range(8): for j in range(8): #weight_point num_point if board[i][j] == ori_color: value_point += weight[i][j] num_self += 1 elif board[i][j] == gameplay.opponent(ori_color): value_point -= weight[i][j] num_op += 1 #frontier_point if board[i][j] != '.': for k in range(8): new_x = i + border_x[k] new_y = j + border_y[k] if gameplay.validPos(new_x, new_y) and board[new_x][new_y] == '.': if board[i][j] == ori_color: frontier_self += 1 else: frontier_op += 1 break #stable_point- just vaguely caculate if total_num >= 20: if (i == 0 or i == 7) and j != 0 and j != 7 and board[i][j] != '.': if horizontal_vague_stable(board, i, j): if board[i][j] == ori_color: stable_self += 1 else: stable_op += 1 if (j == 0 or j == 7) and i != 0 and i != 7 and board[i][j] != '.': if vertical_vague_stable(board, i, j): if board[i][j] == ori_color: stable_self += 1 else: stable_op += 1 if frontier_self > frontier_op: frontier_point = -100 * frontier_self / (frontier_self + frontier_op) elif frontier_op > frontier_self: frontier_point = 100 * frontier_self / (frontier_self + frontier_op) if num_self > num_op: num_point = 100 * num_self / (num_self + num_op) elif num_op > num_self: num_point = -100 * num_op / (num_self + num_op) else: num_point = 0 if stable_self > stable_op: stable_point = 100 * stable_self / (stable_self + stable_op) elif stable_op > stable_self: stable_point = -100 * stable_op / (stable_self + stable_op) else: stable_point = 0 return num_point, value_point, frontier_point, stable_point
def value(board,ori_color,search_depth,total_num): weight = [[99, -8, 8, 6, 6, 8, -8,99], [-8,-24, -4, -3, -3, -4,-24, -8], [8, -4, 7, 4, 4, 7, -4, 8], [6, -3, 4, 0, 0, 4, -3, 6], [6, -3, 4, 0, 0, 4, -3, 6], [8, -4, 7, 4, 4, 7, -4, 8],[-8,-24, -4, -3, -3, -4,-24, -8], [99, -8, 8, 6, 6, 8, -8,99]] value_point=0 num_self=0 #total_num num_op=0 num_point=0 frontier_self=0 #element is on the frontier frontier_op=0 frontier_point=0 stable_self=0 #element is stable and the element is on the side stable_op=0 stable_point=0 border_x=[-1,0,1,-1,1,-1,0,1] border_y=[1,1,1,0,0,-1,-1,-1] for i in range(8): for j in range(8): #weight_point num_point if board[i][j] == ori_color: value_point+=weight[i][j] num_self+=1 elif board[i][j] == gameplay.opponent(ori_color): value_point-=weight[i][j] num_op+=1 #frontier_point if board[i][j] != '.': for k in range(8): new_x=i+border_x[k] new_y=j+border_y[k] if gameplay.validPos(new_x,new_y) and board[new_x][new_y] == '.': if board[i][j] == ori_color: frontier_self+=1 else: frontier_op+=1 break #stable_point- just vaguely caculate if total_num>=20: if (i==0 or i==7) and j!=0 and j!=7 and board[i][j] !='.': if horizontal_vague_stable(board,i,j): if board[i][j] == ori_color: stable_self+=1 else: stable_op+=1 if (j==0 or j==7) and i!=0 and i!=7 and board[i][j] !='.': if vertical_vague_stable(board,i,j): if board[i][j] == ori_color: stable_self+=1 else: stable_op+=1 if frontier_self>frontier_op: frontier_point=-100*frontier_self/(frontier_self+frontier_op) elif frontier_op>frontier_self: frontier_point=100*frontier_self/(frontier_self+frontier_op) if num_self> num_op: num_point=100*num_self/(num_self+num_op) elif num_op>num_self: num_point=-100*num_op/(num_self+num_op) else: num_point=0 if stable_self> stable_op: stable_point=100*stable_self/(stable_self+stable_op) elif stable_op>stable_self: stable_point=-100*stable_op/(stable_self+stable_op) else: stable_point=0 return num_point,value_point,frontier_point,stable_point