class StudentAI(): def __init__(self,row,col,p): self.row = row self.col = col self.p = p self.board = Board(row,col,p) self.board.initialize_game() self.color = '' self.opponent = {1:2,2:1} self.color = 2 def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) tree_depth = 4 # for beta alpha pruning alpha = -math.inf beta = math.inf # best moves list best_moves = [] # Get best moves for row in moves:
class StudentAI(): def __init__(self,col,row,p): self.col = col self.row = row self.p = p self.board = Board(col,row,p) self.board.initialize_game() self.color = '' self.opponent = {1:2,2:1} self.color = 2 def get_move(self, move): #print(self.color) if len(move) != 0: self.board.make_move(move,self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) #self.train() self.simulate_lr(self.color) #index = randint(0,len(moves)-1) #inner_index = randint(0,len(moves[index])-1) #move = moves[index][inner_index] ql = QLearning() move = ql.make_action(self.board, moves) self.board.make_move(move, self.color) self.movecount += 1 return move
class ManualAI(): """ This class describes the ManualAI. """ def __init__(self, col, row, p): """ Intializes manualAI @param row: no of rows in the board @param col: no of columns in the board @param k: no of rows to be filled with checker pieces at the start @return : @raise : """ self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = 2 self.opponent = {1: 2, 2: 1} # to switch turns after each turn def get_move(self, move): """ get_move function for manualAI called from the gameloop in the main module. @param move: A Move object describing the move. @return res_move: A Move object describing the move manualAI wants to make. This move is basically console input. @raise : """ if move.seq: # if move.seq is not an empty list self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) #print(moves) while True: try: for i, checker_moves in enumerate(moves): print(i, ':[', end="") for j, move in enumerate(checker_moves): print(j, ":", move, end=", ") print("]") index, inner_index = map( lambda x: int(x), input("Select Move {int} {int}: ").split( )) # input is from console is handled here. res_move = moves[index][inner_index] except KeyboardInterrupt: raise KeyboardInterrupt except: print('invalid move') continue else: break self.board.make_move(res_move, self.color) return res_move
def simulate_lr(self, color): # simulate one time # record all X features to feature_matrix # update the y value accordingly print("entering simulations") newboard = Board(self.col, self.row, self.p) newboard.initialize_game() feature_list_b = [] feature_list_w = [] win = 0 ### TODO: Fixing Current move in a new board curr_turn = self.opponent[color] for turn in range(50): if newboard.is_win(color) == color: win = 1 break elif newboard.is_win(self.opponent[color]) == self.opponent[color]: break move = self.minimax_move(newboard.get_all_possible_moves(curr_turn)) newboard.make_move(move, curr_turn) b, w = self.get_X(self.board) feature_list_b.append(b) feature_list_w.append(w) self.feature_matrix = np.append(self.feature_matrix, np.array([b, w]), axis=0) print(self.feature_matrix) curr_turn = self.opponent[curr_turn] else: win = 0.5 # matrix = np.array([feature_list_b, feature_list_w]) # feature_matrix = np.hstack((matrix, np.zeros((matrix.shape[0], 1)))) # TODO: Fixing y value update if win == 1 and color == 1: for fb in feature_list_b: index = np.where(fb in self.feature_matrix[:, 0:self.feature_size]) if index == []: self.feature_matrix = np.append(self.feature_matrix, np.array([b, w]), axis=0) self.feature_matrix[index, self.feature_size] += 1 elif win == 0 and color == 1: for fw in feature_list_w: index = np.where(fw in self.feature_matrix[:, 0:self.feature_size]) if index == []: self.feature_matrix = np.append(self.feature_matrix, np.array([b, w]), axis=0) self.feature_matrix[index, self.feature_size] += 1 return win
class StudentAI(): col = 0 row = 0 k = 0 g = 0 def __init__(self,col,row,k,g): self.g = g self.col = col self.row = row self.k = k self.board = Board(col,row,k,g) def get_move(self,move): self.board.make_move(move,1) if self.g == 0: return Move(randint(0,self.col-1),randint(0,self.row-1)) else: return Move(randint(0,self.col-1),0)
class StudentAI(): def __init__(self,col,row,p): self.col = col self.row = row self.p = p self.board = Board(col,row,p) self.board.initialize_game() self.color = '' self.opponent = {1:2,2:1} self.color = 2 self.count = 0 def get_move(self,move): if len(move) != 0: self.board.make_move(move,self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) # index = randint(0,len(moves)-1) # inner_index = randint(0,len(moves[index])-1) # move = moves[index][inner_index] if len(moves) == 1 and len(moves[0]) == 1: move = moves[0][0] if self.count < 15: mct = MonteCarloTree(self.board, self.color, self.opponent, (10, 0, -10)) move = mct.get_action(10, 0) self.board.make_move(move, self.color) else: mct = MonteCarloTree(self.board, self.color, self.opponent, (10, 0, -10)) move = mct.get_action(10, 0) self.board.make_move(move, self.color) return move
class StudentAI(): def __init__(self,col,row,p): self.col = col self.row = row self.p = p self.board = Board(col,row,p) self.board.initialize_game() self.color = '' self.opponent = {1:2,2:1} self.color = 2 def get_move(self,move): if len(move) != 0: self.board.make_move(move,self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) index = randint(0,len(moves)-1) inner_index = randint(0,len(moves[index])-1) move = moves[index][inner_index] self.board.make_move(move,self.color) return move
class StudentAI(): """ This class describes randomAI """ def __init__(self, col, row, p): """ Intializes randomAI @param row: no of rows in the board @param col: no of columns in the board @param p: no of rows to be filled with checker pieces at the start @return : @raise : """ self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} # to switch turns after each turn self.color = 2 def get_move(self, move): """ get_move function for randomAI called from the gameloop in the main module. @param move: A Move object describing the move. @return res_move: A Move object describing the move manualAI wants to make. This move is a random move from the set of valid moves. @raise : """ if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) index = randint(0, len(moves) - 1) inner_index = randint(0, len(moves[index]) - 1) move = moves[index][inner_index] self.board.make_move(move, self.color) return move
class StudentAI: def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = 2 # Add a timer to not exceed 8 minutes def get_move(self, move): # If a move has been made by the opponent, we are player 2 # Else there has been no move, we are player 1 if len(move) != 0: self.board.make_move(move, other(self.color)) else: self.color = 1 mcts = MCTS(MCTSNode(self.color, self.board, self.color, [])) movenode = mcts.best_move(800) #TODO: decide number self.board.make_move(movenode.moves[0], self.color) return movenode.moves[0]
def simulate(self, player): win = 0 counter = 0 fake_board = Board(self.col, self.row, self.p) self.copy_board(fake_board) # print("DIT ME DIEN") # fake_board.show_board() # totaltime = 0 while win == 0: moves = fake_board.get_all_possible_moves(player) if len(moves) == 1: index = 0 elif len(moves) == 0: win = self.opponent[player] break else: index = randint(0, len(moves) - 1) if len(moves[index]) == 1: inner_index = 0 else: inner_index = randint(0, len(moves[index]) - 1) move = moves[index][inner_index] fake_board.make_move(move, player) counter += 1 # bt = time.time() if fake_board.tie_counter >= fake_board.tie_max: win = -1 # totaltime += time.time() - bt # print("self.board.is_win():", time.time() - bt) player = self.opponent[player] # #print("total time is_win:", totaltime) # #bt = time.time() # for i in range(counter): # self.board.undo() # #rint("total time undo:", time.time() - bt) # fake_board.show_board() return win
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 MCTS = MonteCarlo(self.board, self.color) move = MCTS.get_move() self.board.make_move(move, self.color) return move
def train_one_episode(self): new_board = Board() new_board.initialize_game() turn = '' while True: if new_board.is_win(self.color): break elif new_board.is_win(self.opponent[self.color]): break action = self.explore(new_board, self.color) state = new_board new_state = new_board.make_move(action, turn) self.Q_table[state, action] = self.Q_table[state, action] + self.lr * \ (self.reward(state, action) + self.gamma * np.max(self.Q_tableQ[new_state, :])\ - self.Q_table[state, action]) state = new_state
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) best_move = moves[0][0] #self.board.make_move(best_move, self.color) #best_score = self.board_score( self.color ) #self.board.undo() move = self.minMax(self.color, 3, -999999999, best_move, 999999999, best_move)[1] self.board.make_move(move, self.color) return move def minMax(self, player, depth, best_score, best_move, opponent_score, opponent_move): if depth == 0: return self.board_score( player ), best_move # get all the moves of the current player moves = self.board.get_all_possible_moves(player) # Itterate through each move for i in moves: for ii in i: # change to new game state self.board.make_move(ii, player) if (player == self.color): opponent_score = self.minMax(self.opponent[self.color], depth-1, best_score, best_move,opponent_score, opponent_move)[0] if (best_score < opponent_score): best_score = opponent_score best_move = ii # opponent's turn: find the best score based on player's move elif (player == self.opponent[self.color]): best_score = self.minMax(self.color, depth-1, best_score, best_move,opponent_score, opponent_move)[0] if (opponent_score > best_score): opponent_score = best_score opponent_move = ii self.board.undo() return best_score, best_move, opponent_score, opponent_move def board_score(self, color): ## @param color: color of player making the move ## Heuristics to Evaluate with ## Normal Piece : 1000 pts ## King Piece : 2000 pts ## Rows away from enemy end if Normal : (rows - curr_row / rows) * 1000 ## Amount of Pieces : (Amount of pieces left) / (self.col * self.p / 2) * 100 ## Randomization : randomInt (0-10) player_points = 0 opponent_points = 0 for c in range(self.col): for r in range(self.row): current_piece = self.board.board[c][r] if current_piece.get_color() == color: if current_piece.is_king == True: player_points += 2000 else: player_points += 1000 if color == 1: player_points += ((self.row - r) / self.row) * 1000 else: player_points += (r / self.row) * 1000 elif current_piece.get_color() == self.opponent[color]: if current_piece.is_king == True: opponent_points += 2000 else: opponent_points += 1000 if self.opponent[color] == 1: opponent_points += ((self.row - r) / self.row) * 1000 else: opponent_points += (r / self.row) * 1000 else: pass if color == 1: player_points += ((self.board.white_count / (self.col * self.p / 2)) * 100) opponent_points += ((self.board.black_count / (self.col * self.p / 2)) * 100) else: player_points += ((self.board.black_count / (self.col * self.p / 2)) * 100) opponent_points += ((self.board.white_count / (self.col * self.p / 2)) * 100) randomization = randint(0, 50) return player_points - opponent_points + randomization
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = 2 self.mcts = MCTS(TreeNode(self.board, self.color, None, None)) self.total_time_remaining = 479 self.time_divisor = row * col * 0.5 self.timed_move_count = 2 def get_move(self, move) -> Move: ''' prune tree with opponent move MCTS ''' # Start timer start_time = time() # Check if opponent gave a turn and execute it if len(move) != 0: self.play_move(move, OPPONENT[self.color]) # If first move of game, change self.color and make random move else: self.color = 1 self.mcts.root = TreeNode(self.board, self.color, None, None) moves = self.board.get_all_possible_moves(self.color) first_move = moves[0][1] self.play_move(first_move, self.color) return first_move # Check if only one move is possible moves = self.board.get_all_possible_moves(self.color) if len(moves) == 1 and len(moves[0]) == 1: self.play_move(moves[0][0], self.color) return moves[0][0] # Set up time limit time_limit = self.total_time_remaining / self.time_divisor # MCTS move_chosen = self.mcts.search(time_limit) self.play_move(move_chosen, self.color) # Change time divisor self.time_divisor -= 0.5 - 1/self.timed_move_count self.timed_move_count += 1 # Decrement time remaining and return self.total_time_remaining -= time() - start_time return move_chosen def play_move(self, move, color): """ Updates board and tree root using Move given, either Move we just played or Move given by opponent. """ self.board.make_move(move, color) for child in self.mcts.root.children.items(): if str(move) == str(child[0]) and child[1] is not None: self.mcts.root = child[1] self.mcts.root.parent = None return self.mcts.root = TreeNode(self.board, OPPONENT[color], None, None)
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 self.search_lim = 5 self.current_node = TreeNode(None, self.color) def get_move(self, move): if len(move) != 0: # print("|" + str(move) + "|") self.board.make_move(move, self.opponent[self.color]) #print("Player", self.opponent[self.color], "make move", move) if len(self.current_node.child_node) != 0: for child in self.current_node.child_node: if str(child.move) == str(move): self.current_node = child else: self.color = 1 self.current_node.player = self.color for i in range(NS): self.mcts(self.current_node) #self.board.show_board() #print("mcts counter:", i) move = self.current_node.child_node[0] for child in self.current_node.child_node: if move.uct() < child.uct(): move = child self.board.make_move(move.move, self.color) # print("Player", self.color, "make move", move.move, "with a winrate of", move.winrate(), "simulated", move.simulation) self.current_node = move return move.move def mcts(self, node): if node.simulation >= minVisit: #print("depth:", depth) node.simulation += 1 if not len(node.child_node): moves = self.board.get_all_possible_moves(node.player) for move in moves: for eachmove in move: node.child_node.append( TreeNode(eachmove, self.opponent[node.player], node)) # proceed next = self.mcts_selection(node) self.board.make_move(next.move, node.player) result = self.board.is_win(node.player) if result: if result == self.opponent[node.player]: node.win += 1 elif result == node.player: next.win += 1 next.simulation += 1 self.board.undo() return result #self.board.show_board() result = self.mcts(next) self.board.undo() # propagate up if result == self.opponent[node.player]: node.win += 1 return result else: result = self.simulate(node.player) node.simulation += 1 if result == self.opponent[node.player]: node.win += 1 #print("simulating", result) return result def mcts_selection(self, node): # Select optimal UCB node current = node.child_node[0] for child in node.child_node: #print(current.uct()) if current.uct() < child.uct(): current = child #print("player", node.player, "pick", current.move) return current def simulate(self, player): win = 0 counter = 0 fake_board = Board(self.col, self.row, self.p) self.copy_board(fake_board) # print("DIT ME DIEN") # fake_board.show_board() # totaltime = 0 while win == 0: moves = fake_board.get_all_possible_moves(player) if len(moves) == 1: index = 0 elif len(moves) == 0: win = self.opponent[player] break else: index = randint(0, len(moves) - 1) if len(moves[index]) == 1: inner_index = 0 else: inner_index = randint(0, len(moves[index]) - 1) move = moves[index][inner_index] fake_board.make_move(move, player) counter += 1 # bt = time.time() if fake_board.tie_counter >= fake_board.tie_max: win = -1 # totaltime += time.time() - bt # print("self.board.is_win():", time.time() - bt) player = self.opponent[player] # #print("total time is_win:", totaltime) # #bt = time.time() # for i in range(counter): # self.board.undo() # #rint("total time undo:", time.time() - bt) # fake_board.show_board() return win def copy_board(self, board): """ EZ game :return: ez board """ board.tie_counter = self.board.tie_counter board.tie_max = self.board.tie_max board.board = copy.deepcopy(self.board.board) board.saved_move = copy.deepcopy(self.board.saved_move) board.black_count = self.board.black_count board.white_count = self.board.white_count
class StudentAI(): def __init__(self,col,row,p): self.col = col self.row = row self.p = p self.board = Board(col,row,p) self.board.initialize_game() self.color = '' self.opponent = {1:2,2:1} self.color = 2 def get_move(self,move): if len(move) != 0: self.board.make_move(move,self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) index = randint(0,len(moves)-1) inner_index = randint(0,len(moves[index])-1) # move = moves[index][inner_index] move = self.min_max_recursion(4, True)[0] self.board.make_move(move,self.color) return move def min_max_recursion(self, depth, maximizingPlayer): if depth == 0 and self.color == 1: return self.board.black_count - self.board.white_count elif depth == 0 and self.color == 2: return self.board.white_count - self.board.black_count maximum = -100 max_move = "" minimum = 100 min_move = "" if maximizingPlayer: selfmoves = self.board.get_all_possible_moves(self.color) #maximum = -100 for s_checker_moves in selfmoves: for sm in s_checker_moves: self.board.make_move(sm, self.color) Recurs = self.min_max_recursion(depth - 1, False) # print("Recurs: ",Recurs) temp = maximum if type(Recurs) == type(tuple()): maximum = max(maximum, Recurs[1]) else: maximum = max(maximum, Recurs) # print("maximum: ",maximum) if temp != maximum: max_move = sm #alpha = max(alpha, Recurs) # print("alpha",alpha) self.board.undo() #if beta <= alpha: # break return (max_move, maximum) else: #minimum = 100 oppmoves = self.board.get_all_possible_moves(self.opponent[self.color]) for o_checker_moves in oppmoves: for om in o_checker_moves: self.board.make_move(om, self.opponent[self.color]) Recurs = self.min_max_recursion(depth - 1, True) # print("Recurs: ",Recurs) temp = minimum if type(Recurs) == type(tuple()): minimum = min(minimum, Recurs[1]) else: minimum = min(minimum, Recurs) # print("minimum: ",minimum) if temp != minimum: min_move = om #beta = min(beta, Recurs) # print("beta: ", beta) self.board.undo() #if beta <= alpha: # break return (min_move, minimum)
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 self.root = Node(self.color, -1) self.start = None def flatten(self, ini_list) -> list: return sum(ini_list, []) def isTimeLeft(self): time = datetime.datetime.now() if (time - self.start).seconds < turnTimer: return True return False def select( self ) -> Node: #REMINDER: moves is the flattened list of all available moves maxNode = self.root maxUct = -1 ptr = self.root uct = None found = False while len(ptr.children) != 0: #Node is not a leaf node moves = self.flatten(self.board.get_all_possible_moves(ptr.color)) for m in moves: found = False for c in ptr.children: if not found and m == c.move: uct = c.UCT() if uct > maxUct: maxUct = uct maxNode = c found = True if not found: return ptr #Node is a leaf node, return parent to expand later if maxNode.move != -1: self.board.make_move(maxNode.move, ptr.color) ptr = maxNode # Node is leaf node return ptr #Same thing as line 135 def expand(self, node) -> Node: moves = self.flatten(self.board.get_all_possible_moves(node.color)) toMove = moves[0] childrenMoves = [] for c in node.children: childrenMoves.append(c.move.seq) for m in moves: if childrenMoves.count( m.seq ) == 0: #Get all available moves for node, then find the leaf node to expand toMove = m break child = Node(self.opponent[node.color], toMove, node) node.children.append(child) return child def simulate(self, child): players = {1: "B", 2: "W"} winner = None counter = 0 color = child.color while self.board.is_win(players[color]) == 0: moves = self.flatten(self.board.get_all_possible_moves(color)) if len(moves) != 0: #player has moves i = randint(0, len(moves) - 1) self.board.make_move(moves[i], color) color = self.opponent[color] counter += 1 else: #player doesnt have moves, but game hasn't ended yet color = self.opponent[color] winner = self.board.is_win(players[color]) while counter != 0: self.board.undo() counter -= 1 return winner def backProp(self, result, child): while child is not None: child.upSims() if result != child.color: child.upWins() child = child.parent def MCTS(self, moves) -> Move: while (self.isTimeLeft()): parent = self.select() expand = self.expand(parent) #TODO check if expand() returns None result = self.simulate(expand) self.backProp(result, expand) bestMove = None # self.root.children[i].move if len(self.root.children) == 0: index = randint(0, len(moves) - 1) bestMove = moves[index] else: bestWR = -1 i = 0 while i != len(self.root.children): if self.root.children[i].getWinRate() > bestWR: bestWR = self.root.children[i].getWinRate() bestMove = self.root.children[i].move i += 1 return bestMove def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) if self.root.parent is None: # len(self.root.children) == 0: #what if the root.children doesnt contain the one move we wanted? # FIX: checking len of self.root.children to moves of self.root self.root.move = move else: i = 0 while i != len(self.root.children): if self.root.children[i].move == move: break i += 1 if i != len(self.root.children): self.root = self.root.children[i] else: #no child node: add it new_root = Node(self.color, move, self.root) self.root.children.append(new_root) self.root = new_root else: self.color = 1 self.root.color = 1 self.start = datetime.datetime.now() moves = self.flatten(self.board.get_all_possible_moves( self.root.color)) move = self.MCTS(moves) self.board.make_move(move, self.root.color) # PROBLEM LINE: color mismatch # update root to move just picked from MCTS i = 0 while i != len(self.root.children): if self.root.children[i].move == move: break i += 1 self.root = self.root.children[i] return move
class StudentAI: def __init__(self, col, row, k, g): self.k = k self.col = col self.row = row self.board = Board(col, row, k, g) self.g = True if g == 1 else False self.win = 10**k def get_move(self, move): if move.row == -1 and move.col == -1: move = Move(self.col // 2, self.row // 2) self.board = self.board.make_move(move, AI) return move self.board = self.board.make_move(move, OP) if self.g: move, _ = self.max_val(self.board.board, MIN, MAX, 6) else: move, _ = self.max_val(self.board.board, MIN, MAX, 4) while self.board.board[move.row][move.col] != 0: move.col = randint(0, self.col - 1) move.row = randint(0, self.row - 1) self.board = self.board.make_move(move, AI) return move def available_move(self, board): res = [] for c in range(self.col // 2, self.col): for r in range(self.row - 1, -1, -1): if board[r][c] == 0: res.append(Move(c, r)) if self.g: break for c in range(self.col // 2 - 1, -1, -1): for r in range(self.row - 1, -1, -1): if board[r][c] == 0: res.append(Move(c, r)) if self.g: break return res def max_val(self, board, alpha, beta, deep): if deep == 0: return Move(0, 0), self.heuristic(board, AI) val = MIN res = [Move(0, 0), 0] moves = self.available_move(board) for mv in moves: board[mv.row][mv.col] = AI _, score = self.min_val(board, alpha, beta, deep - 1) board[mv.row][mv.col] = 0 if score > val: val = score res = [mv, score] if val >= self.win: break alpha = max(alpha, val) if alpha >= beta: break return res def min_val(self, board, alpha, beta, deep): if deep == 0: return Move(0, 0), self.heuristic(board, OP) val = MAX res = [Move(0, 0), 0] moves = self.available_move(board) for mv in moves: board[mv.row][mv.col] = OP _, score = self.max_val(board, alpha, beta, deep - 1) board[mv.row][mv.col] = 0 if score < val: val = score res = [mv, score] if val <= -self.win: break beta = min(beta, val) if alpha >= beta: break return res def eval(self, board, player): val = 0 for row in board: col = len(row) j1 = j2 = 0 while j2 < col: space = 0 while j1 < col and row[j1] != player: j1 += 1 j2 = j1 while j2 < col and row[j2] == player: j2 += 1 if j1 < col and j2 < col: diff = j2 - j1 if j1 - 1 > 0 and row[j1 - 1] == 0: space += 1 if row[j2] == 0: space += 1 if space == 2 and self.g == 0: diff += 1 if space != 0: val += 10**diff elif self.k == diff: val += 10**diff elif j1 < col <= j2: diff = col - j1 if j1 - 1 >= 0 and row[j1 - 1] == 0: space += 1 if space != 0: val += 10**diff elif self.k == diff: val += 10**diff else: break j1 = j2 return val def heuristic(self, board1, player): def transpose(board): return array(board).transpose().tolist() def diaganol(board): board = array(board) res = [] for i in range(1, len(board)): res.append(diag(board, i).tolist()) res.append(diag(board, -i).tolist()) res.append(diag(board).tolist()) return res board2 = transpose(board1) board3 = diaganol(board1) board4 = diaganol(board2) val1 = self.eval(board1, AI) + self.eval(board2, AI) + self.eval( board3, AI) + self.eval(board4, AI) val2 = self.eval(board1, OP) + self.eval(board2, OP) + self.eval( board3, OP) + self.eval(board4, OP) if val1 >= self.win and val2 >= self.win: return self.win if player == AI else -self.win if val1 >= self.win or val2 >= self.win: return self.win if val1 >= self.win else -self.win return val1 - val2
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[ self.color]) # Run opponent's move for self.board else: self.color = 1 root = Tree(self.opponent[self.color]) #Tree root self.rec_tree(root, search_depth) self.rec_heuristic(root) avail_moves = root.value[list(root.value)[0]] cur_move = avail_moves[0] #print(avail_moves) self.board.make_move(cur_move, self.color) # Make the optimal move move = cur_move return move def ftu(self, color): #Function to use (min vs max by color) if color == self.color: # Calculate Min return max else: # Calculate Max return min def min_max(self, children, color): # Returns dict -> {Max/min value: Moves to get here} ftu = self.ftu(color) #Use corresponding min or max depending on color value_map = {} for child in children: for v in child.value.keys(): value_map.setdefault(v, []).append( child.move ) # D: {heuristic value: Move to make to get here} # print(value_map) return {ftu(value_map): value_map[ftu(value_map)]} def board_points( self): # 5 + row number for pawns, 5 + row number + 2 for kings pts = 0 for i in range(self.row): for j in range(self.col): checker = self.board.board[i][j] if checker.color == 'B': # For black side pieces pts += 5 + checker.row if checker.is_king: # 2 additional pts for king pts += 2 elif checker.color == 'W': # FOr white side pieces pts -= 11 - checker.row # 5 + (6 - Row) if checker.is_king: # 2 additional pts for king pts -= 2 return pts if self.color == "B" else -pts def print_tree(self, root, level=0): # print("PRINTING TREE") print("\t" * level, root.value, "->", root.move) if len(root.children) != 0: # Not Leaf node for child in root.children: self.print_tree(child, level + 1) def rec_tree(self, root: Tree, level=1): if level == 0: pass else: if root.move is not None: # Not root of tree self.board.make_move(root.move, root.color) #Check if win here maybe? avail_moves = self.board.get_all_possible_moves( self.opponent[root.color]) for i in range(len(avail_moves)): for j in range(len(avail_moves[i])): #print(root) root.children.append( Tree(self.opponent[root.color], avail_moves[i][j])) for child in root.children: self.rec_tree(child, level - 1) if root.move is not None: self.board.undo() def rec_heuristic(self, root: Tree): if root.move is not None: self.board.make_move(root.move, root.color) if len(root.children) == 0: #Passed node has no children pass #Evaluate heuristic for board(and return?) root.value = {self.board_points(): []} else: #Evaluate rec_heuristic for children, then retrieve values and apply min/max as appropriate for child in root.children: self.rec_heuristic(child) root.value = self.min_max(root.children, root.color) if root.move is not None: self.board.undo()
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 self.ct = 0 #self.dif_val = False self.size = self.col * self.row if self.size < 40: #6x6 #print(8) self.search_depth = 8 elif self.size < 50: #7x7 #print(7) self.search_depth = 5 elif self.size < 80: #8x8 #print(6) self.search_depth = 4 else: self.search_depth = 4 def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[ self.color]) # Run opponent's move for self.board else: self.color = 1 try: self.search_depth except NameError: print("ERROR") search_depth = 5 if self.size < 40: #6x6 if self.ct == 5: self.search_depth += 1 #9 elif self.ct == 10: self.search_depth += 1 #10 elif self.size < 50: #7x7 if self.ct == 2: self.search_depth += 1 #6 elif self.ct == 5: self.search_depth += 1 #7 if self.ct == 10: self.search_depth += 1 #8 elif self.ct == 15: self.search_depth += 1 #9 elif self.ct == 20: self.search_depth += 1 #10 elif self.size < 80: #8x8 if self.ct == 3: self.search_depth += 1 #5 elif self.ct == 5: self.search_depth += 1 #6 elif self.ct == 7: self.search_depth += 1 #7 elif self.ct == 11: self.search_depth += 1 #8 else: if self.ct == 10: self.search_depth += 1 elif self.ct == 20: self.search_depth += 2 root = Tree(self.opponent[self.color]) # Tree root #print('Detph', self.search_depth, self.ct) self.rec_tree(root, self.search_depth) # Set up tree self.rec_min_max_heuristic(root) #self.rec_abp_heuristic(root) #self.rec_abp_v2(root) avail_moves = root.value[list(root.value)[0]] #cur_move = avail_moves[randint(0,len(avail_moves)-1)] cur_move = avail_moves[0] ''' print("ALL MOVES") moves = self.board.get_all_possible_moves(self.color) for i, checker_moves in enumerate(moves): print(i, ':[', end="") for j, move in enumerate(checker_moves): print(j, ":", move, end=", ") print("]") print("AVAIL MOVES") #print(avail_moves) for i, checker_moves in enumerate(avail_moves): print(i, ':[', end="") for j, move in enumerate(checker_moves): print(j, ":", move, end=", ") print("]") ''' #if self.dif_val: if debug: print("##########TREE##########") self.print_tree(root) if debug: print("##########TREE##########") # self.dif_val = False self.board.make_move(cur_move, self.color) # Make the optimal move move = cur_move return move # Board Heuristic def board_points( self): # 5 + row number for pawns, 5 + row number + 2 for kings king_pts_value = 5 + ( self.row - 1 ) + 5 #5 pts for piece, self.row -1 pts for pts at end of board, + 1 for being king pts = 0 b_pawns = set() b_kings = set() w_pawns = set() w_kings = set() for i in range(self.row): for j in range(self.col): checker = self.board.board[i][j] if checker.color == "B": #Black if checker.is_king: b_kings.add((i, j)) else: b_pawns.add((i, j)) elif checker.color == "W": #White if checker.is_king: w_kings.add((i, j)) else: w_pawns.add((i, j)) # if b_pawns == set(): # print("-" * 20) # self.board.show_board() # b_pawns = set() # b_kings = set() # w_pawns = set() # w_kings = set() # for i in range(self.row): # for j in range(self.col): # checker = self.board.board[i][j] # if checker.color == "B": #Black # if checker.is_king: # b_kings.add((i,j)) # else: # b_pawns.add((i,j)) # elif checker.color == "W": #White # if checker.is_king: # w_kings.add((i,j)) # else: # w_pawns.add((i,j)) for pawn in b_pawns: pts += 5 + pawn[0] for pawn in w_pawns: pts -= (5 + (self.row - pawn[0] - 1)) for king in b_kings: pts += king_pts_value dist = 0 for w in w_kings: dist += sqrt((king[0] - w[0])**2 + (king[1] - w[1])**2) for w in w_pawns: dist += sqrt((king[0] - w[0])**2 + (king[1] - w[1])**2) if len(w_kings) + len(w_pawns) != 0: pts -= dist / (len(w_kings) + len(w_pawns)) for king in w_kings: pts -= king_pts_value dist = 0 for b in b_kings: dist += sqrt((king[0] - b[0])**2 + (king[1] - b[1])**2) for b in b_pawns: dist += sqrt((king[0] - b[0])**2 + (king[1] - b[1])**2) if len(b_kings) + len(b_pawns) != 0: pts += dist / (len(b_kings) + len(b_pawns)) #if abs(pts) > 2: # self.dif_val = True #if debug: print(color(root.color), pts, -pts) return pts if self.color == 2 else -pts #BLACK(1) GOES FIRST, so positive points, if self.color == white(2), then return white pieces as positive points def print_tree(self, root, level=0): if not debug: return print("\t" * level, color(root.color), root.value, "->", root.move) if len(root.children) != 0: # Not Leaf node for child in root.children: self.print_tree(child, level + 1) def rec_tree(self, root: Tree, level=1): # Create tree up to depth level if level == 0: pass else: if root.move is not None: # Not root of tree self.board.make_move(root.move, root.color) # Check if win here maybe? avail_moves = self.board.get_all_possible_moves( self.opponent[root.color]) for i in range(len(avail_moves)): for j in range(len(avail_moves[i])): # print(root) root.children.append( Tree(self.opponent[root.color], avail_moves[i][j])) for child in root.children: self.rec_tree(child, level - 1) if root.move is not None: self.board.undo() # MinMax Functions def ftu(self, color): # Function to use (min vs max by color) if color == self.color: # Calculate Max return max else: # Calculate Min return min def min_max(self, children, color): # Returns dict -> {Max/min value: Moves to get here} ftu = self.ftu( color) # Use corresponding min or max depending on color value_map = {} for child in children: for v in child.value.keys(): value_map.setdefault(v, []).append( child.move ) # D: {heuristic value: Move to make to get here} # print(value_map) return {ftu(value_map): value_map[ftu(value_map)]} def rec_min_max_heuristic(self, root: Tree): # Apply min_max heuristic to tree if root.move is not None: # AKA this is root, the move is what opponent made to get here (none so we don't have to redo move on our board) self.board.make_move(root.move, root.color) if len(root.children) == 0: # Passed node has no children # Evaluate heuristic for board(and return?) root.value = { self.board_points(): [] } # Value will be dict with key = heuristic points and value = all the moves that result in that many points else: # Evaluate rec_heuristic for children, then retrieve values and apply min/max as appropriate for child in root.children: self.rec_min_max_heuristic(child) root.value = self.min_max(root.children, root.color) if root.move is not None: self.board.undo( ) # Undo move to revert action (done for searching) and return to parent # AlphaBeta Functions def set_alpha_beta(self, root, child, color): ftu = self.ftu(color) if child.value is None: print(child) if root.value is None: root.value = {} if color == self.color: # Max aka update alpha (This ai's turn) # return ftu(alpha, ftu(child.value)), beta if root.alpha < ftu(child.value): root.alpha = ftu(child.value) root.value.setdefault(root.alpha, []).append(child.move) else: # Min aka update beta (Opponent's turn) # return alpha, ftu(beta, ftu(child.value)) if root.beta > ftu(child.value): root.beta = ftu(child.value) root.value.setdefault(root.beta, []).append(child.move) def rec_abp_heuristic(self, root: Tree, alpha=-999, beta=999, level=0): # Alpha Beta Pruning if debug: print("\t" * level, color(root.color), "Enter: ", root.value, "->", root.move) old_val = root.value if root.move is not None: # AKA this is root, the move is what opponent made to get here (none so we don't have to redo move on our board) self.board.make_move(root.move, root.color) #self.board.show_board() if len( root.children ) == 0: # Passed node has no children aka this is lowest level/leaf root.value = {self.board_points(): []} if debug: print("\t" * level, "LEAF: ", root.value, "->", root.move) else: # Evaluate heuristic for child, retrieve value, update alphabeta, continue with next child if appropriate root.alpha = alpha root.beta = beta if debug: print("\t" * 16, "CHILDREN:", end=" ") for child in root.children: if debug: print(child.move, end=", ") if debug: print("(", color(self.opponent[root.color]), ")", sep="") for child in root.children: if root.alpha >= root.beta: # Break out of loop once alpha >= beta (Pruning) if debug: print("PRUNING") break self.rec_abp_heuristic(child, root.alpha, root.beta, level + 1) self.set_alpha_beta( root, child, root.color ) # Apply alpha/beta values based on min/max of child to current node if debug: print("\t" * level, color(root.color), "New Value: ", root.value, "->", root.move) if root.move is not None: self.board.undo() if debug: print("\t" * level, color(root.color), "Exit: ", root.value, "->", root.move) #print(max(list(root.value), key = abs), "\t", root.move, "->", root.value) #if abs(max(list(root.value), key = abs)) > 2: #print("\t" * level, "Enter: ", old_val, "->", root.move) #print("\t" * level, "Exit: ", root.value, "->", root.move) def rec_abp_v2(self, root: Tree, alpha=-999, beta=999): if root.move is not None: # AKA this is root, the move is what opponent made to get here (none so we don't have to redo move on our board) self.board.make_move(root.move, root.color) else: root.value = {} if len(root.children) == 0: root.value = self.board_points() if root.move is not None: self.board.undo() return root.value else: if color == self.color: #MaximizingPlayer #val = -999 for child in root.children: ''' val = max(val, rec_abp_v2(child, alpha, beta)) alpha = max(alpha, val) ''' val = self.rec_abp_v2(child, alpha, beta) if alpha > val: #Alpha > Val root.alpha = alpha else: #Val > Alpha alpha = val if root.move is None: #Root node, ie save the move to get here root.value.setdefault(alpha, []).append(child.move) root.alpha = alpha if alpha >= beta: break if root.move is not None: self.board.undo() return alpha else: #Minimizing Player #val = 999 for child in root.children: ''' val = min(val, alphabeta(child, alpha, beta)) beta = min(val, beta) ''' val = self.rec_abp_v2(child, alpha, beta) if beta < val: #Beta < Val root.beta = beta else: beta = val if root.move is None: root.value.setdefault(beta, []).append(child.move) root.beta = beta if alpha >= beta: break if root.move is not None: self.board.undo() return beta
class StudentAI(): INITIAL_DEPTH_LIMIT = 5 EARLY_GAME_TURNS = 10 TURN_COLOR_MAP = {1: "B", 2: "W"} def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 self.depth = 0 self.turn = 0 self.control = asyncio.get_event_loop() self.iterative_depth_limit = self.INITIAL_DEPTH_LIMIT self.time_left = TimeFlags.UNDER self.time_used = 0 self.upper_depth_limit = float('inf') self.time_limit = 480 # 8 minutes self.late_game_flag = False self.heuristic_flag = 1 #use ieee1, if 2, use ieee2 self.flag_just_changed = 0 # Timer, which can to have set values based on total used time. Min sleep must be > 1 async def timer(self, state): # Here was originally a hueristic to determin which sleep pattern/upper depth limit to use # our_count = self.countOurPieces(state) # if self.time_used < 120: # Use this sleep before two minute mark # if our_count <= self.p*self.row/2*0.4: # self.upper_depth_limit = 12 # await asyncio.sleep(20) # else: # self.upper_depth_limit = 2 # await asyncio.sleep(1) try: if self.time_used < 120: # Use this sleep before two minute mark self.upper_depth_limit = 8 await asyncio.sleep(15) elif self.time_used < 240: # Four minute mark self.upper_depth_limit = 7 await asyncio.sleep(10) elif self.time_used < 360: # Six minute mark self.upper_depth_limit = 7 await asyncio.sleep(8) elif self.time_used < 420: # Seven minute mark self.upper_depth_limit = 6 await asyncio.sleep(5) else: # Anything longer than above self.upper_depth_limit = 5 await asyncio.sleep(1) # After waiting, set time to over time self.time_left = TimeFlags.OVER # Handle when we cancel the timer except asyncio.CancelledError: self.time_left = TimeFlags.UNDER # finally: # self.upper_depth_limit = float('inf') # Asyncio function that will create the tasks and run them concurrently # It will wait until both are either finished or canceled, and then return that move async def min_max_start(self): # Create tasks which will be ran concurrently self.task_timer = asyncio.Task(self.timer(self.board)) self.task_minmax = asyncio.Task(self.minMaxSearch(self.board)) # Run tasks together at the same time, returns minimax moves, hence [0] chosen_move = await asyncio.gather(self.task_minmax, self.task_timer) return chosen_move[0] def get_move(self, move): # Keep track of time so we can figure out total time used start_time = self.control.time() if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 self.turn += 1 # Start the asynchronous minmax timer search move = self.control.run_until_complete(self.min_max_start()) # Make our move self.board.make_move(move, self.color) # Add to our ongoing used time, 8 minute time limit as defined under self.timer() self.time_used += self.control.time() - start_time return move async def minMaxSearch(self, state): # Get all of our moves ourMoves = state.get_all_possible_moves(self.color) lastBestVal = float('-inf') # Iterate through all of our moves to find the max of them self.time_left = TimeFlags.UNDER self.iterative_depth_limit = self.INITIAL_DEPTH_LIMIT while self.time_left != TimeFlags.OVER: for moves in ourMoves: for ourMove in moves: # If we're over time, just return our current best if self.time_left == TimeFlags.OVER: return lastBestMove state.make_move(ourMove, self.color) tempMax = await self.minValue(state, 1, float('-inf'), float('inf')) if lastBestVal < tempMax: lastBestVal = tempMax lastBestMove = ourMove state.undo() # If maxVal is better than the one kept from the lastBestMove, set those as lastBest # Upon each iteration, increase depth limit by 1 self.iterative_depth_limit += 1 # If we reached out set max, stop iterating and just return what we have if self.iterative_depth_limit > self.upper_depth_limit: break # Context switch back to the timer, to check if it's ran out await asyncio.sleep(0) # Return depth limit back to what it was originally, cancel the timer because we've reached # the upper depth limit. self.iterative_depth_limit = self.INITIAL_DEPTH_LIMIT self.task_timer.cancel() return lastBestMove async def maxValue(self, state, depth, alpha, beta): #Check if this state is a win state isWin = state.is_win(self.TURN_COLOR_MAP[self.opponent[self.color]]) if isWin != 0: if isWin == self.color: return 999999999 elif isWin == self.opponent[self.color]: return -999999999 await asyncio.sleep(0) #Get all of our moves and check if we have hit depth limit or if timer runs out. If we have, run eval function ourMoves = state.get_all_possible_moves(self.color) if (depth >= self.iterative_depth_limit ) or len(ourMoves) == 0 or self.time_left == TimeFlags.OVER: return self.evalFunction(state) v = float('-inf') depth += 1 for moves in ourMoves: for ourMove in moves: state.make_move(ourMove, self.color) v = max(v, await self.minValue(state, depth, alpha, beta)) state.undo() if v >= beta: return v alpha = max(alpha, v) return v async def minValue(self, state, depth, alpha, beta): isWin = state.is_win(self.TURN_COLOR_MAP[self.color]) if isWin != 0: if isWin == self.color: return 999999999 elif isWin == self.opponent[self.color]: return -999999999 await asyncio.sleep(0) oppMoves = state.get_all_possible_moves(self.opponent[self.color]) if (depth >= self.iterative_depth_limit ) or len(oppMoves) == 0 or self.time_left == TimeFlags.OVER: return self.evalFunction(state) v = float('inf') depth += 1 for moves in oppMoves: for oppMove in moves: state.make_move(oppMove, self.opponent[self.color]) v = min(v, await self.maxValue(state, depth, alpha, beta)) state.undo() if v <= alpha: return v beta = min(beta, v) return v def evalFunction(self, state): if self.turn < self.EARLY_GAME_TURNS: return self.ieeeEvaluation1(state) #go to their side if self.late_game_flag: if self.flag_just_changed > 0: self.flag_just_changed -= 1 if self.heuristic_flag == 1: return self.ieeeEvaluation1(state) elif self.heuristic_flag == 2: return self.ieeeEvaluation2(state) else: self.checkSide(state) if self.heuristic_flag == 1: return self.ieeeEvaluation1(state) elif self.heuristic_flag == 2: return self.ieeeEvaluation2(state) else: self.checkLateGame(state) return self.ieeeEvaluation1(state) # earlyOrLate = self.getEarlyOrLate(state) # if earlyOrLate[0] == -1: # return -999999999 # elif earlyOrLate[0] == 0: # return self.ieeeEvaluation(state) # elif earlyOrLate[0] == 1: # return self.lateGameKingEval(state, earlyOrLate[1], earlyOrLate[2], earlyOrLate[3]) def checkLateGame(self, state): numOurCheckers = 0 numOurKings = 0 for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": numOurCheckers += 1 if checkerPiece.is_king: numOurKings += 1 elif self.color == 2: if checkerPiece.color == "W": numOurCheckers += 1 if checkerPiece.is_king: numOurKings += 1 if numOurKings / numOurCheckers == 1: self.late_game_flag = True def checkSide(self, state): #return 0 if we have our troops not yet all on their side #return 1 if we have our troops on their side numOurCheckers = 0 numSideUs = 0 numSideTheirs = 0 if self.color == 1: rowCheck = 2 if len(state.board) == 7 else 3 rowCheckTheir = 4 elif self.color == 2: rowCheck = 4 rowCheckTheir = 2 if len(state.board) == 7 else 3 for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": numOurCheckers += 1 if row < rowCheck: numSideUs += 1 if row > rowCheckTheir: numSideTheirs += 1 elif self.color == 2: if checkerPiece.color == "W": numOurCheckers += 1 if row > rowCheck: numSideUs += 1 if row < rowCheckTheir: numSideTheirs += 1 if self.heuristic_flag == 1 and numSideTheirs / numOurCheckers > 0.8: self.heuristic_flag = 2 self.flag_just_changed = 10 if self.heuristic_flag == 2 and numSideUs / numOurCheckers > 0.8: self.heuristic_flag = 1 self.flag_just_changed = 10 #black always starts from 0,0 while white starts on the other side def ieeeEvaluation1(self, state): ourPawn = 0 ourKing = 0 ourMiddle = 0 ourRow = 0 oppPawn = 0 oppKing = 0 oppMiddle = 0 oppRow = 0 boardRowLen = len(state.board) middleRowEnd = len(state.board) - 3 middleColEnd = len(state.board[0]) - 3 for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": #our piece ourRow += row if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: ourMiddle += 1 if checkerPiece.is_king: ourKing += 1 else: ourPawn += 1 elif checkerPiece.color == "W": #their piece oppRow += boardRowLen - row - 1 if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: oppMiddle += 1 if checkerPiece.is_king: oppKing += 1 else: oppPawn += 1 elif self.color == 2: if checkerPiece.color == "W": #our piece ourRow += boardRowLen - row - 1 if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: ourMiddle += 1 if checkerPiece.is_king: ourKing += 1 else: ourPawn += 1 elif checkerPiece.color == "B": #opponent piece oppRow += row if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: oppMiddle += 1 if checkerPiece.is_king: oppKing += 1 else: oppPawn += 1 return (80 * ((ourPawn - oppPawn) + 2.5 * (ourKing - oppKing))) + ( 40 * (ourRow - oppRow)) + (20 * (ourMiddle - oppMiddle)) def ieeeEvaluation2(self, state): ourPawn = 0 ourKing = 0 ourMiddle = 0 ourRow = 0 oppPawn = 0 oppKing = 0 oppMiddle = 0 oppRow = 0 boardRowLen = len(state.board) middleRowEnd = len(state.board) - 3 middleColEnd = len(state.board[0]) - 3 for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": #our piece ourRow += boardRowLen - row - 1 if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: ourMiddle += 1 if checkerPiece.is_king: ourKing += 1 else: ourPawn += 1 elif checkerPiece.color == "W": #their piece oppRow += row if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: oppMiddle += 1 if checkerPiece.is_king: oppKing += 1 else: oppPawn += 1 elif self.color == 2: if checkerPiece.color == "W": #our piece ourRow += row if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: ourMiddle += 1 if checkerPiece.is_king: ourKing += 1 else: ourPawn += 1 elif checkerPiece.color == "B": #opponent piece oppRow += boardRowLen - row - 1 if row >= 2 and row <= middleRowEnd and col >= 2 and col <= middleColEnd: oppMiddle += 1 if checkerPiece.is_king: oppKing += 1 else: oppPawn += 1 return (80 * ((ourPawn - oppPawn) + 2.5 * (ourKing - oppKing))) + ( 40 * (ourRow - oppRow)) + (20 * (ourMiddle - oppMiddle))
class StudentAI(): def __init__(self,col,row,p): self.col = col self.row = row self.p = p self.board = Board(col,row,p) self.board.initialize_game() self.color = '' self.opponent = {1:2,2:1} self.color = 2 def get_move(self,move): if len(move) != 0: self.board.make_move(move,self.opponent[self.color]) else: self.color = 1 bestVal = -999 bestMove = None # if there is only one move to make, just make the move without evaluating possible_moves = self.board.get_all_possible_moves(self.color) if len(possible_moves) == 1 and len(possible_moves[0]) == 1: self.board.make_move(possible_moves[0][0], self.color) return possible_moves[0][0] for moves in possible_moves: for move in moves: self.board.make_move(move, self.color) val = self.search(1, StudentAI.switchColors(self.color), MIN, MAX) self.board.undo() if val > bestVal: bestVal = val bestMove = move self.board.make_move(bestMove, self.color) return bestMove def search(self, depth, currentColor, alpha, beta): if depth == 4 or self.board.is_win('B') or self.board.is_win('W'): return self.evaluate(currentColor) best = MIN if currentColor == self.color else MAX for moves in self.board.get_all_possible_moves(currentColor): for move in moves: self.board.make_move(move, currentColor) val = self.search(depth+1, StudentAI.switchColors(currentColor), alpha, beta) self.board.undo() if currentColor == self.color: best = max(best, val) alpha = max(alpha, best) elif currentColor != self.color: best = min(best, val) beta = min(beta, best) if beta <= alpha: return best return best def piece_differential(self, currentColor): if currentColor == 'B': return self.board.black_count - self.board.white_count return self.board.white_count - self.board.black_count def evaluate(self, currentColor): currentColor = 'B' if currentColor == 1 else 'W' oppColor = 'W' if currentColor == 'B' else 'B' # if we win in this game state, prefer to choose this path # if the opponent wins in this game state, stay away from this path if self.board.is_win(currentColor): return 500 elif self.board.is_win(oppColor): return -500 piece_location, kings = 0, 0 for i in range(self.board.row): for j in range(self.board.col): if (self.board.board[i][j].color == currentColor): if self.board.board[i][j].is_king: kings += 1 # we prefer the king to be in the middle of the board if i <= self.row / 2: piece_location += 7 + i else: piece_location += 7 + (self.board.row - i - 1) else: # we prefer the pawns to go to the opponent's side of the board if self.board.board[i][j].color == 'B': piece_location += 5 + i else: piece_location += 5 + (self.board.row - i - 1) elif (self.board.board[i][j].color == oppColor): if self.board.board[i][j].is_king: kings -= 1 # we prefer the opponent's king to not be in the middle of the board if i <= self.row / 2: piece_location -= 7 + i else: piece_location -= 7 + (self.board.row - i - 1) else: # we prefer the opponent's pawns to not be on our side of the board if self.board.board[i][j].color == 'B': piece_location -= 5 + i else: piece_location -= 5 + (self.board.row - i - 1) # if we have more kings, we prefer to play more aggressive if kings > 0: return piece_location + self.board.row * self.piece_differential(currentColor) else: return piece_location + self.piece_differential(currentColor) @staticmethod def switchColors(color): if color == 1: return 2 return 1
class StudentAI(): INITIAL_DEPTH_LIMIT = 2 EARLY_GAME_TURNS = 10 TURN_COLOR_MAP = {1: "B", 2: "W"} def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 self.depth = 0 self.turn = 0 self.control = asyncio.get_event_loop() self.iterative_depth_limit = self.INITIAL_DEPTH_LIMIT self.time_left = TimeFlags.UNDER self.time_used = 0 self.upper_depth_limit = float('inf') # Timer, which can to have set values based on total used time. Min sleep must be > 1 async def timer(self, state): # our_count = self.countOurPieces(state) # if self.time_used < 120: # Use this sleep before two minute mark # if our_count <= self.p*self.row/2*0.4: # self.upper_depth_limit = 12 # await asyncio.sleep(20) # else: # self.upper_depth_limit = 2 # await asyncio.sleep(1) if self.time_used < 120: # Use this sleep before two minute mark self.upper_depth_limit = 3 await asyncio.sleep(20) elif self.time_used < 240: # Four minute mark self.upper_depth_limit = 6 await asyncio.sleep(10) elif self.time_used < 360: # Six minute mark self.upper_depth_limit = 5 await asyncio.sleep(8) elif self.time_used < 420: # Seven minute mark self.upper_depth_limit = 5 await asyncio.sleep(5) else: # Anything longer than above self.upper_depth_limit = 4 await asyncio.sleep(1) # After waiting, set time to over time self.time_left = TimeFlags.OVER async def min_max_start(self): # Create tasks which will be ran concurrently self.task_timer = asyncio.Task(self.timer(self.board)) self.task_minmax = asyncio.Task(self.minMaxSearch(self.board)) # Run tasks together at the same time, returns minimax moves, hence [0] chosen_move = await asyncio.gather(self.task_minmax, self.task_timer) return chosen_move[0] def get_move(self, move): start_time = self.control.time() if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 self.turn += 1 # Start the asynchronous minmax timer search move = self.control.run_until_complete(self.min_max_start()) self.board.make_move(move, self.color) # Add to our ongoing used time, 8 minute time limit self.time_used += self.control.time() - start_time return move # moves = self.board.get_all_possible_moves(self.color) # index = randint(0,len(moves)-1) # inner_index = randint(0,len(moves[index])-1) # move = moves[index][inner_index] # self.board.make_move(move,self.color) # return move async def minMaxSearch(self, state): # Get all of our moves ourMoves = state.get_all_possible_moves(self.color) maxVal = float('-inf') # Iterate through all of our moves to find the max of them self.time_left = TimeFlags.UNDER self.iterative_depth_limit = self.INITIAL_DEPTH_LIMIT while self.time_left != TimeFlags.OVER: for moves in ourMoves: for ourMove in moves: # If we're over time, just return our current best if self.time_left == TimeFlags.OVER: return lastBest state.make_move(ourMove, self.color) tempMax = await self.minValue(state, 1, float('-inf'), float('inf')) if maxVal < tempMax: maxVal = tempMax chosenMove = ourMove state.undo() lastBest = chosenMove # Upon each iteration, increase depth limit by 1 self.iterative_depth_limit += 1 # If we reached out set max, stop iterating and just return what we have if self.iterative_depth_limit > self.upper_depth_limit: break # Context switch back to the timer, to check if it's ran out await asyncio.sleep(0) # Return depth limit back to what it was originally self.iterative_depth_limit = self.INITIAL_DEPTH_LIMIT return lastBest async def maxValue(self, state, depth, alpha, beta): #Check if this state is a win state isWin = state.is_win(self.TURN_COLOR_MAP[self.opponent[self.color]]) if isWin != 0: if isWin == self.color: return 999999999 elif isWin == self.opponent[self.color]: return -999999999 await asyncio.sleep(0) #Get all of our moves and check if we have hit depth limit. If we have, run eval function ourMoves = state.get_all_possible_moves(self.color) if (depth >= self.iterative_depth_limit ) or len(ourMoves) == 0 or self.time_left == TimeFlags.OVER: return self.evalFunction(state) v = float('-inf') depth += 1 for moves in ourMoves: for ourMove in moves: state.make_move(ourMove, self.color) v = max(v, await self.minValue(state, depth, alpha, beta)) state.undo() if v >= beta: return v alpha = max(alpha, v) return v async def minValue(self, state, depth, alpha, beta): isWin = state.is_win(self.TURN_COLOR_MAP[self.color]) if isWin != 0: if isWin == self.color: return 999999999 elif isWin == self.opponent[self.color]: return -999999999 await asyncio.sleep(0) oppMoves = state.get_all_possible_moves(self.opponent[self.color]) if (depth >= self.iterative_depth_limit ) or len(oppMoves) == 0 or self.time_left == TimeFlags.OVER: return self.evalFunction(state) v = float('inf') depth += 1 for moves in oppMoves: for oppMove in moves: state.make_move(oppMove, self.opponent[self.color]) v = min(v, await self.maxValue(state, depth, alpha, beta)) state.undo() if v <= alpha: return v beta = min(beta, v) return v def evalFunction(self, state): if self.turn < self.EARLY_GAME_TURNS: return self.pieceAndRowEval(state) earlyLateList = self.getEarlyOrLate(state) if earlyLateList[0] == -1: #In this state, we have no pieces return -999999999 if earlyLateList[0] == 1: # late game heuristic return self.lateGameKingEval(state, earlyLateList[1], earlyLateList[2]) else: # Early game heuristic return self.pieceAndRowEval(state) def countOurPieces(self, state): ongoing = 0 for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": ongoing += 1 elif self.color == 2: if checkerPiece.color == "W": ongoing += 1 return ongoing def getEarlyOrLate(self, state): #return list of gameboard state [0 or 1 (early or lategame), ourKings, oppKings] #return 0 if early, or 1 if late totalCheckers = 0 numOurCheckers = 0 numOurKings = 0 numOppCheckers = 0 numOppKings = 0 ourKings = [] oppKings = [] for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": totalCheckers += 1 numOurCheckers += 1 if checkerPiece.is_king: numOurKings += 1 ourKings.append((row, col)) elif checkerPiece.color == "W": totalCheckers += 1 numOppCheckers += 1 if checkerPiece.is_king: numOppKings += 1 oppKings.append((row, col)) elif self.color == 2: if checkerPiece.color == "W": totalCheckers += 1 numOurCheckers += 1 if checkerPiece.is_king: numOurKings += 1 ourKings.append((row, col)) elif checkerPiece.color == "B": totalCheckers += 1 numOppCheckers += 1 if checkerPiece.is_king: numOppKings += 1 oppKings.append((row, col)) if numOurCheckers == 0: return [-1, ourKings, oppKings] if numOurKings > numOppKings or numOurKings / numOurCheckers == 1: return [1, ourKings, oppKings] else: return [0, ourKings, oppKings] def lateGameKingEval(self, state, ourKings, oppKings): ourDistance = 0 for ourKing in ourKings: for oppKing in oppKings: ourDistance += abs(ourKing[0] - oppKing[0]) + abs(ourKing[1] - oppKing[1]) if len(ourKings) > len(oppKings): #attack return -1 * ourDistance else: #run away return ourDistance #black always starts from 0,0 while white starts on the other side def pieceAndRowEval(self, state): ourCount = 0 oppCount = 0 boardLen = len(state.board) for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": #our piece if checkerPiece.is_king: ourCount += 5 + (row + 1) + 2 else: #in our half ourCount += 5 + (row + 1) elif checkerPiece.color == "W": #their piece if checkerPiece.is_king: oppCount += 5 + (boardLen - row) + 2 else: oppCount += 5 + (boardLen - row) elif self.color == 2: if checkerPiece.color == "W": #our piece if checkerPiece.is_king: ourCount += 5 + (boardLen - row) + 2 else: ourCount += 5 + (boardLen - row) elif checkerPiece.color == "B": #opponent piece if checkerPiece.is_king: oppCount += 5 + (row + 1) + 2 else: oppCount += 5 + (row + 1) return ourCount - oppCount
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 # ---------- What we added ----------- self.calc_time = datetime.timedelta(seconds=3) self.max_moves = 35 self.wins = {} self.plays = {} self.max_depth = 0 self.C = 1.4 self.colors = {1: "B", 2: "W"} self.letters = {"B": 1, "W": 2} self.states = [] def run_sim(self, board): player = self.colors[self.color] number = self.letters[player] visited_states = set() expand = True for i in range(self.max_moves): moves = board.get_all_possible_moves(number) if len(moves) == 0: return if all( self.plays.get((player, x)) for move in moves for x in move): max_move = self.selection(moves, player) else: index = randint(0, len(moves) - 1) inner_index = randint(0, len(moves[index]) - 1) max_move = moves[index][inner_index] board.make_move(max_move, number) if expand == True and (player, max_move) not in self.plays: expand = False self.expand(player, max_move) visited_states.add((player, max_move)) winner = board.is_win("W") if winner == 1 or winner == 2 or winner == -1: break if player == "W": player = "B" number = 1 else: player = "W" number = 2 if winner == 0: return elif winner == -1: winner == self.colors[self.color] else: winner = self.colors[winner] self.back_propagate(visited_states, winner) def selection(self, moves, player): max = -100000 max_move = "" sum_plays = 0 for g in moves: for x in g: sum_plays = sum_plays + self.plays.get((player, x), 0) for g in moves: for x in g: try: one = self.wins[(player, x)] / self.plays[(player, x)] score = one + self.C * sqrt( log(sum_plays) / self.plays[(player, x)]) except: score = -100000 if score > max: max = score max_move = x return max_move def back_propagate(self, visited_states, winner): for player, move in visited_states: if (player, move) not in self.plays: continue self.plays[(player, move)] += 1 if player == winner: self.wins[(player, move)] += 1 def expand(self, player, move): self.plays[(player, move)] = 0 self.wins[(player, move)] = 0 def get_move(self, move): first = False if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 first = True player = self.colors[self.color] moves = self.board.get_all_possible_moves(self.color) index = randint(0, len(moves) - 1) inner_index = randint(0, len(moves[index]) - 1) move = moves[index][inner_index] if first: self.board.make_move(move, self.color) return move games = 0 begin = datetime.datetime.utcnow() new_board = deepcopy(self.board) while datetime.datetime.utcnow() - begin < self.calc_time: self.run_sim(new_board) games += 1 max_move = self.selection(moves, player) if max_move == "": max_move = move self.board.make_move(max_move, self.color) if max_move == "": return move return max_move
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 self.movecount = 0 self.simulate_times = 100 # self.file = f"{self.col}-{self.row}-{self.color}-data.txt" # self.file = open(f"{self.col}-{self.row}-data.txt", "a") def get_move(self, move): self.movecount += 1 if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 self.file = f"{self.col}-{self.row}-{self.color}-data.txt" moves = self.get_moves(self.board, self.color) move = self.monte_carlo_tree(moves, self.simulate_times) self.board.make_move(move, self.color) return move def monte_carlo_tree(self, moves: [], simulate_times: int): s_parent = simulate_times * len(moves) best_uct = -math.inf best_move = 0 for move in moves: wins = self.simulate(move, simulate_times) uct = wins / simulate_times + math.sqrt( 2 * math.log(s_parent) / simulate_times) if uct > best_uct: best_move = move index = randint(0, len(moves) - 1) move = moves[index] return move def simulate(self, move, simulate_times): win = 0 self.board.make_move(move, self.color) for _ in range(simulate_times): curr_turn = self.opponent[self.color] t = 0 moves = self.get_moves(self.board, curr_turn) while len(moves) > 0 and t < 50: move = self.rollout(moves) self.board.make_move(move, curr_turn) curr_turn = self.opponent[curr_turn] moves = self.get_moves(self.board, curr_turn) t += 1 win += 1 if curr_turn != self.color else 0 if t != 50 else 0.5 self.undo(self.board, t) print(win / simulate_times * 100) bf, wf = self.board_to_feature(self.board, self.color) self.write_to_file(bf, wf, win / simulate_times * 100) self.board.undo() return win def board_to_feature(self, board, color): # result = "" # result += f"{board.white_count/self.total} {board.black_count/self.total} " # # wking,bking = self.wking_bking(board) # result += f"{wking/self.total} {bking/self.total} " # # wback, bback = self.wback_bback(board) # result += f"{wback/self.total} {bback/self.total} " # # wedge, bedge = self.wedge_bedge(board) # result += f"{wedge/self.total} {bedge/self.total} " # # wdiagonal, bdiagonal = self.wdiagonal_bdiagonal(board) # result += f"{wdiagonal/self.total} {bdiagonal/self.total} " # # wdis, bdis = self.wdis_bdis(board) # result += f"{wdis/self.total} {bdis/self.total} " # # result += str(self.movecount) wking, bking = self.wking_bking(board) wcount, bcount = self.wcount_bcount(board) wdis, bdis = self.wdis_bdis(board) wedge, bedge = self.wedge_bedge(board) wcenter, bcenter = self.wcenter_bcenter(board) wback, bback = self.wback_bback(board) wdiag, bdiag = self.wdiag_bdiag(board) wdog, bdog = self.wdog_bdog(board) wbridge, bbridge = self.wbridge_bbridge(board) wuptriangle, buptriangle = self.wuptriangle_buptriangle(board) wdowntriangle, bdowntriangle = self.wdowntriangle_bdowntriangle(board) woreo, boreo = self.woreo_boreo(board) if self.color == 1: wmoveable, weatable = self.moveables(board, 2) bmoveable, beatable = 0, 0 else: wmoveable, weatable = 0, 0 bmoveable, beatable = self.moveables(board, 1) return [wcount, wking, wdis, wback, wedge, wcenter, wdiag, wdog, wbridge, wuptriangle, wdowntriangle, woreo, wmoveable, weatable],\ [bcount, bking, bdis, bback, bedge, bcenter, bdiag, bdog, bbridge, buptriangle, bdowntriangle, boreo, bmoveable, beatable] def wcount_bcount(self, board): return board.white_count, board.black_count def wking_bking(self, board): bking, wking = 0, 0 for r in range(self.board.row): for c in range(self.board.col): if self.board.board[r][c].color == "B": bking += self.board.board[r][c].is_king elif self.board.board[r][c].color == "W": wking += self.board.board[r][c].is_king return wking, bking def moveables(self, board, color): moves = [ m for chess in board.get_all_possible_moves(color) for m in chess ] eatable = 0 for m in moves: if len(m.seq) > 2: eatable += (len(m.seq) - 1) continue if math.sqrt((m.seq[0][0] - m.seq[1][0])**2 + (m.seq[0][1] - m.seq[1][1])**2) > 1: eatable += 1 # print(f"len(moves): {len(moves)}, eatable: {eatable}") return len(moves), eatable def wback_bback(self, board): bback = sum(board.board[0][i].color == "B" for i in range(board.col)) wback = sum(board.board[board.row - 1][i].color == "W" for i in range(board.col)) return wback, bback def wedge_bedge(self, board): bedge = sum((board.board[i][0].color == "B") + (board.board[i][board.col - 1].color == "B") for i in range(board.row)) wedge = sum((board.board[i][0].color == "W") + (board.board[i][board.col - 1].color == "W") for i in range(board.row)) # print(f"wedge: {wedge}, bedge: {bedge}") return wedge, bedge def wcenter_bcenter(self, board): wcenter = sum((board.board[int(board.row/2)][i].color =="W")+ \ (board.board[int(board.row/2)+1][i].color =="W") for i in range(board.col)) bcenter = sum((board.board[int(board.row/2)][i].color == "B")+ \ (board.board[int(board.row/2)+1][i].color =="B") for i in range(board.col)) # print(f"wcenter: {wcenter}, bcenter: {bcenter}") return wcenter, bcenter def wdiagonal_bdiagonal(self, board): bdiagonal = sum(board.board[i][i].color == "B" for i in range(board.row//4, 3*board.row//4)) + \ sum(board.board[board.row - 1 - i][board.row - 1 - i].color == "B" for i in range(board.row)) wdiagonal = sum(board.board[i][i].color == "W" for i in range(board.row)) + \ sum(board.board[board.row - 1 - i][board.row - 1 - i].color == "W" for i in range(board.row)) # print(f"wdiagonal: {wdiagonal}, bdiagonal: {bdiagonal}") return wdiagonal, bdiagonal def wdiag_bdiag(self, board): bc, wc = 0, 0 for r in range(board.row - 1): bc += (board.board[r][r].color == "B") + (board.board[r+1][r].color == "B") + (board.board[r][r+1].color == "B") \ + (board.board[r][board.col-1-r].color == "B") + (board.board[r+1][board.col-1-r].color == "B") +\ (board.board[r][board.col-2-r].color == "B") wc += (board.board[r][r].color == "W") + (board.board[r + 1][r].color == "W") + (board.board[r][r + 1].color == "W")\ + (board.board[r][board.col-1-r].color == "W") + (board.board[r+1][board.col-1-r].color == "W") +\ (board.board[r][board.col-2-r].color == "W") bc += (board.board[board.row - 1][0].color == "B") + ( board.board[board.row - 1][board.row - 1].color == "B") wc += (board.board[board.row - 1][0].color == "W") + ( board.board[board.row - 1][board.row - 1].color == "W") # print(f"wdiag: {wc}, bdiag: {bc}") return wc, bc def wdog_bdog(self, board): wc = (board.board[board.row-1][board.col-1].color == "." and board.board[board.row-1][board.col-2].color == "W" \ and board.board[board.row-2][board.col-1].color == "B") +\ (board.board[board.row-1][0].color == "." and board.board[board.row-1][1].color == "W"\ and board.board[board.row-2][0].color == "B") bc = (board.board[0][0].color == "." and board.board[0][1].color == "B" \ and board.board[1][0].color == "W") + \ (board.board[0][board.col-1].color == "." and board.board[0][board.col-2].color == "B" \ and board.board[1][board.col-1].color == "W") # print(f"wdog: {wc}, bdog: {bc}") return wc, bc def wbridge_bbridge(self, board): bc = sum( board.board[0][c].color == "B" and board.board[0][c + 2].color == "B" for c in range(1, board.col - 3)) wc = sum(board.board[board.row - 1][c].color == "W" and board.board[board.row - 1][c + 2].color == "W" for c in range(1, board.col - 3)) # print(f"wbridge: {wc}, bbridge: {bc}") return wc, bc def wuptriangle_buptriangle(self, board): bcount, wcount = 0, 0 for r in range(1, board.row - 1): for c in range(board.col - 2): if board.board[r][c].color == "B" and board.board[r - 1][ c + 1].color == "B" and board.board[r][c + 2].color == "B": bcount += 1 if board.board[r][c].color == "W" and board.board[r - 1][ c + 1].color == "W" and board.board[r][c + 2].color == "W": wcount += 1 # print(f"wuptriangle: {wcount}, buptriangle: {bcount}") return wcount, bcount def wdowntriangle_bdowntriangle(self, board): bcount, wcount = 0, 0 for r in range(board.row - 1): for c in range(board.col - 2): if board.board[r][c].color == "B" and board.board[r + 1][ c + 1].color == "B" and board.board[r][c + 2].color == "B": bcount += 1 if board.board[r][c].color == "W" and board.board[r + 1][ c + 1].color == "W" and board.board[r][c + 2].color == "W": wcount += 1 # print(f"wdowntriangle: {wcount}, bdowntriangle: {bcount}") return wcount, bcount def woreo_boreo(self, board): ''' :param board: :return: triangle pattern in the last row ''' boreo = sum(board.board[0][c].color == "B" and board.board[1][c+1].color == "B" \ and board.board[0][c+2].color == "B" for c in range(0, board.col-2)) woreo = sum(board.board[board.row-1][c].color == "W" and board.board[board.row-2][c+1].color == "W" \ and board.board[board.row-1][c+2].color == "W" for c in range(0, board.col-2)) # print(f"woreo: {woreo}, boreo: {boreo}") return woreo, boreo def wdis_bdis(self, board): wdis = sum(board.row - 1 - i for i in range(board.row) for j in range(board.col) if board.board[i][j].color == "W") bdis = sum(i for i in range(board.row) for j in range(board.col) if board.board[i][j].color == "B") return wdis, bdis ######### help function ######### def rollout(self, moves): '''Random roll a move from moves''' return moves[randint(0, len(moves) - 1)] def get_moves(self, board, turn): return [ m for chess in board.get_all_possible_moves(turn) for m in chess ] def undo(self, board, times): for _ in range(times): board.undo() def write_to_file(self, wfeatures, bfeatures, win_rate): with open(self.file, "a") as f: w = ' '.join(str(x) for x in wfeatures) b = ' '.join(str(x) for x in bfeatures) f.write(w + ' ' + b + ' ' + str(win_rate) + '\n')
class StudentAI(): DEPTH_LIMIT = 4 EARLY_GAME_TURNS = 5 TURN_COLOR_MAP = {1: "B", 2: "W"} def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 self.depth = 0 self.turn = 0 def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 self.turn += 1 move = self.minMaxSearch(self.board) self.board.make_move(move, self.color) return move # moves = self.board.get_all_possible_moves(self.color) # index = randint(0,len(moves)-1) # inner_index = randint(0,len(moves[index])-1) # move = moves[index][inner_index] # self.board.make_move(move,self.color) # return move def minMaxSearch(self, state): ourMoves = state.get_all_possible_moves(self.color) maxVal = float('-inf') for moves in ourMoves: for ourMove in moves: state.make_move(ourMove, self.color) tempMax = self.minValue(state, 0) if maxVal < tempMax: maxVal = tempMax chosenMove = ourMove state.undo() return chosenMove def maxValue(self, state, depth): isWin = state.is_win(self.TURN_COLOR_MAP[self.opponent[self.color]]) if isWin != 0: if isWin == self.color: return 999999999 elif isWin == self.opponent[self.color]: return -999999999 depth += 1 ourMoves = state.get_all_possible_moves(self.color) if (depth >= self.DEPTH_LIMIT) or len(ourMoves) == 0: return self.evalFunction(state) maxVal = float('-inf') for moves in ourMoves: for ourMove in moves: state.make_move(ourMove, self.color) maxVal = max(maxVal, self.minValue(state, depth)) state.undo() return maxVal def minValue(self, state, depth): isWin = state.is_win(self.TURN_COLOR_MAP[self.color]) if isWin != 0: if isWin == self.color: return 999999999 elif isWin == self.opponent[self.color]: return -999999999 depth += 1 oppMoves = state.get_all_possible_moves(self.opponent[self.color]) if (depth >= self.DEPTH_LIMIT) or len(oppMoves) == 0: return self.evalFunction(state) minVal = float('inf') for moves in oppMoves: for oppMove in moves: state.make_move(oppMove, self.opponent[self.color]) minVal = min(minVal, self.maxValue(state, depth)) state.undo() return minVal def evalFunction(self, state): if self.turn < self.EARLY_GAME_TURNS: return self.pieceAndRowEval(state) earlyLateList = self.getEarlyOrLate(state) if earlyLateList[0] == -1: #In this state, we have no pieces return -999999999 if earlyLateList[0] == 1: # late game heuristic return self.lateGameKingEval(state, earlyLateList[1], earlyLateList[2]) else: # Early game heuristic return self.pieceAndRowEval(state) def getEarlyOrLate(self, state): #return list of gameboard state [0 or 1 (early or lategame), ourKings, oppKings] #return 0 if early, or 1 if late totalCheckers = 0 numOurCheckers = 0 numOurKings = 0 numOppCheckers = 0 numOppKings = 0 ourKings = [] oppKings = [] for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": totalCheckers += 1 numOurCheckers += 1 if checkerPiece.is_king: numOurKings += 1 ourKings.append((row, col)) elif checkerPiece.color == "W": totalCheckers += 1 numOppCheckers += 1 if checkerPiece.is_king: numOppKings += 1 oppKings.append((row, col)) elif self.color == 2: if checkerPiece.color == "W": totalCheckers += 1 numOurCheckers += 1 if checkerPiece.is_king: numOurKings += 1 ourKings.append((row, col)) elif checkerPiece.color == "B": totalCheckers += 1 numOppCheckers += 1 if checkerPiece.is_king: numOppKings += 1 oppKings.append((row, col)) if numOurCheckers == 0: return [-1, ourKings, oppKings] if numOurKings / numOurCheckers == 1: return [1, ourKings, oppKings] else: return [0, ourKings, oppKings] def lateGameKingEval(self, state, ourKings, oppKings): ourDistance = 0 for ourKing in ourKings: for oppKing in oppKings: ourDistance += abs(ourKing[0] - oppKing[0]) + abs(ourKing[1] - oppKing[1]) if len(ourKings) > len(oppKings): #attack return -1 * ourDistance else: #run away return ourDistance #black always starts from 0,0 while white starts on the other side def pieceAndRowEval(self, state): ourCount = 0 oppCount = 0 boardLen = len(state.board) for row in range(0, len(state.board)): for col in range(0, len(state.board[row])): checkerPiece = state.board[row][col] if self.color == 1: if checkerPiece.color == "B": #our piece if checkerPiece.is_king: ourCount += 5 + (row + 1) + 2 else: #in our half ourCount += 5 + (row + 1) elif checkerPiece.color == "W": #their piece if checkerPiece.is_king: oppCount += 5 + (boardLen - row) + 2 else: oppCount += 5 + (boardLen - row) elif self.color == 2: if checkerPiece.color == "W": #our piece if checkerPiece.is_king: ourCount += 5 + (boardLen - row) + 2 else: ourCount += 5 + (boardLen - row) elif checkerPiece.color == "B": #opponent piece if checkerPiece.is_king: oppCount += 5 + (row + 1) + 2 else: oppCount += 5 + (row + 1) return ourCount - oppCount
class StudentAI(): def __init__(self,col,row,p): self.row = row self.col = col self.p = p self.board = Board(col,row,p) self.board.initialize_game() self.color = '' self.opponent = {1:2,2:1} self.color = 2 self.turn_color = {1: "B", 2: "W"} def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) curr_distance = self.king_distance(self.board, self.turn_color[self.color]) aggressive = False late_game = False if self.board.black_count + self.board.white_count <= 8: late_game = True if self.color == 1 and self.board.black_count >= self.board.white_count: aggressive = True if self.color == 2 and self.board.white_count >= self.board.black_count: aggressive = True depth = 4 alpha = -math.inf beta = math.inf best_moves = [] for row in moves: for move in row: board_copied = copy.deepcopy(self.board) board_copied.make_move(move, self.color) curr = self.MinValue(board_copied, depth-1, alpha, beta) if late_game: distance_diff = self.king_distance(board_copied, self.turn_color[self.color]) - curr_distance if aggressive: curr += distance_diff/1000 else: curr -= distance_diff/1000 if curr > alpha: alpha = curr best_moves = [move] elif curr == alpha: best_moves.append(move) best_move = random.choice(best_moves) self.board.make_move(best_move, self.color) return best_move def MaxValue(self, board, depth, alpha, beta): moves = board.get_all_possible_moves(self.color) if depth == 0: # print(self.evaluate(board)) return self.evaluate(board) elif len(moves) == 0: if self.checkWinner(board.board, self.color): # print("1", self.color, depth) return 999 else: # print("2", self.color, depth) return -999 val = -math.inf for row in moves: for move in row: board_copied = copy.deepcopy(board) board_copied.make_move(move, self.color) val = max(val, self.MinValue(board_copied, depth-1, alpha, beta)) alpha = max(alpha, val) if alpha >= beta: return val return val def MinValue(self, board, depth, alpha, beta): moves = board.get_all_possible_moves(self.opponent[self.color]) if depth == 0: # print(self.evaluate(board)) return self.evaluate(board) if len(moves) == 0: if self.checkWinner(board.board, self.color): # print("3", self.color, depth) return 999 else: # print("4", self.color, depth) return -999 val = math.inf for row in moves: for move in row: board_copied = copy.deepcopy(board) board_copied.make_move(move, self.opponent[self.color]) val = min(val, self.MaxValue(board_copied, depth-1, alpha, beta)) beta = min(beta, val) if alpha >= beta: return val return val def evaluate(self,board): if self.color == 1: return board.black_count - board.white_count + self.boardEval1(board, "b")/100 else: return board.white_count - board.black_count + self.boardEval1(board, "w")/100 def boardEval1(self, board, color): val = 0 for i, row in enumerate(board.board): for j, col in enumerate(row): extra = 0 if j == 0 or j == len(row): extra = 4 if color == "b": pawn_val = 5 + i + extra king_val = 5 + len(board.board) + 2 + extra if i == 0: pawn_val = 10 + extra else: pawn_val = 5 + (len(board.board) - 1 - i) + extra king_val = 5 + len(board.board) + 2 + extra if i == len(board.board) - 1: pawn_val = 10 + extra curr_color = board.board[i][j].get_color().lower() if curr_color != '.': if curr_color == color: king = board.board[i][j].is_king if king: val += king_val else: val += pawn_val else: king = board.board[i][j].is_king if king: val -= king_val else: val -= pawn_val return val def checkWinner(self, board, color): my_color = self.turn_color[color] oppo_color = self.turn_color[self.opponent[color]] for row in range(self.row): for col in range(self.col): checker = board[row][col] if checker.color == my_color: return True elif checker.color == oppo_color: return False def king_distance(self, board, color): k1 = [] k2 = [] min_distance = 100 for row in range(board.row): for col in range(board.col): checker = board.board[row][col] if checker.color != ".": if checker.is_king and checker.color == color: k1.append([row, col]) elif checker.color != color: k2.append([row, col]) for i in k1: for j in k2: d = self.cal_distance(i,j) if self.cal_distance(i,j) < min_distance: min_distance = d # print(k1, k2, min_distance) return min_distance def cal_distance(self, p1, p2): return math.sqrt(math.pow(p1[0]-p2[0], 2) + math.pow(p1[1]-p2[1], 2))
class StudentAI(): col = 0 row = 0 k = 0 g = 0 moves = 0 player_number = 2 opponent_number = 1 valid_moves = PriorityQueue() moves_generated = False def __init__(self,col,row,k,g): self.g = g self.col = col self.row = row self.k = k self.board = Board(col,row,k,g) def get_move(self,move): start = time.time() if move.col == -1 and move.row == -1: self.player_number = 1 self.opponent_number = 2 else: self.board = self.board.make_move(move, self.opponent_number) self.moves += 1 self.moves_generated = False my_move = self.iterative_deepening() # my_move = self.greedy_search() self.board = self.board.make_move(my_move, self.player_number) self.moves += 1 end = time.time() # print("Time elapsed: {} seconds".format(end - start)) return my_move # def greedy_search(self) -> Move: # children = self.expand_node(self.board) # best_state = None # for child in children: # result_board = copy.deepcopy(self.board) # result_board.make_move(child) # child.heuristic = self.evaluate_board(result_board, self.player_number) # if best_move is None or child.heuristic > best_move.heuristic: # best_move = child # return best_move def iterative_deepening(self) -> MoveWithAnalysis: best_state = None start_time = time.time() for i in range(0, (self.col * self.row) - self.moves): state = self.alpha_beta_negamax(self.board, 0, i, -math.inf, math.inf, start_time) if state is not None: best_state = state # print(i) # print("Best Move:({}, {}): {}".format(best_state.col, best_state.row, best_state.heuristic)) # for valid_move in self.valid_moves.queue: # print("({}, {}): {}".format(valid_move.col, valid_move.row, valid_move.heuristic)) # self.valid_moves.sort(reverse=True) else: break # best_state = self.alpha_beta_negamax(self.board, 0, 1, -math.inf, math.inf, start_time) # self.valid_moves.sort(reverse=True) # for valid_move in self.valid_moves.queue: # print("({}, {}): {}".format(valid_move.col, valid_move.row, valid_move.heuristic)) self.valid_moves.queue.clear() return best_state def alpha_beta_negamax(self, board: Board, depth: int, max_depth: int, alpha: int, beta: int, start_time: int) -> MoveWithAnalysis: if time.time() - start_time > TIME_LIMIT: # print("Depth: {}".format(depth)) return None if board.is_win() or depth > max_depth: # print("Depth: {}".format(depth)) if depth % 2 == 0: heuristic = self.evaluate_board(board, self.player_number) current_move = MoveWithAnalysis(None, None, heuristic) return current_move else: heuristic = self.evaluate_board(board, self.opponent_number) current_move = MoveWithAnalysis(None, None, -heuristic) return current_move best_move = None if self.valid_moves.empty() and not self.moves_generated: children = self.expand_node(board) for child in children: result_board = copy.deepcopy(board) result_board = result_board.make_move(child, self.player_number) current_move = MoveWithAnalysis(child.col, child.row, 0) current_move.heuristic = self.evaluate_board(result_board, self.player_number) self.valid_moves.put(current_move) self.moves_generated = True # self.valid_moves.sort(reverse=True) if depth == 0: temp_queue = PriorityQueue() while not self.valid_moves.empty(): valid_move = self.valid_moves.get() result_board = copy.deepcopy(board) result_board = result_board.make_move(valid_move, self.player_number) current_move = self.alpha_beta_negamax(result_board, depth + 1, max_depth, -beta, -alpha, start_time) if current_move is None: return None current_move.col = valid_move.col current_move.row = valid_move.row current_move.heuristic = -current_move.heuristic valid_move.heuristic = current_move.heuristic temp_queue.put(valid_move) # print("({}, {}): {}".format(valid_move.col, valid_move.row, valid_move.heuristic)) if best_move is None or current_move.heuristic > best_move.heuristic: best_move = current_move if current_move.heuristic > alpha: alpha = current_move.heuristic if alpha >= beta: # print("PRUNED") return best_move self.valid_moves = temp_queue else: children = self.expand_node(board) for child in children: result_board = copy.deepcopy(board) if depth % 2 == 0: result_board = result_board.make_move(child, self.player_number) else: result_board = result_board.make_move(child, self.opponent_number) current_move = self.alpha_beta_negamax(result_board, depth + 1, max_depth, -beta, -alpha, start_time) if current_move is None: return None current_move.col = child.col current_move.row = child.row current_move.heuristic = -current_move.heuristic if best_move is None or current_move.heuristic > best_move.heuristic: best_move = current_move if current_move.heuristic > alpha: alpha = current_move.heuristic if alpha >= beta: # print("PRUNED") return best_move return best_move # children = self.expand_node(board) # for child in children: # result_board = copy.deepcopy(board) # if depth % 2 == 0: # result_board = result_board.make_move(child, self.player_number) # else: # result_board = result_board.make_move(child, self.opponent_number) # current_move = self.alpha_beta_negamax(result_board, depth + 1, max_depth, -beta, -alpha, start_time) # if current_move is None: # return None # current_move.col = child.col # current_move.row = child.row # current_move.heuristic = -current_move.heuristic # child.heuristic = current_move.heuristic # if best_move is None or current_move.heuristic > best_move.heuristic: # best_move = current_move # if current_move.heuristic > alpha: # alpha = current_move.heuristic # if alpha >= beta: # # print("PRUNED") # return best_move # if depth == 0: # for child in children: # print("({}, {}): {}".format(child.col, child.row, child.heuristic)) # return best_move def evaluate_board(self, board: Board, player_evaluated: int) -> int: score = 0 steps = [(0,1),(1,0),(0,-1),(-1,0),(1,1),(-1,-1),(1,-1),(-1,1)] tie = True for i in range(self.row): for j in range(self.col): if board.board[i][j] == 0: tie = False continue first_player = board.board[i][j] row_number = self.col - j for step in steps: is_win = True temp_row = i temp_col = j temp_score = 0 for pieces in range(1, self.k): temp_row += step[0] temp_col += step[1] temp_row_number = self.col - temp_row if not board.is_valid_move(temp_col, temp_row, False): is_win = False if pieces < self.k: temp_score = 0 break if board.board[temp_row][temp_col] != first_player and board.board[temp_row][temp_col] != 0: is_win = False temp_score = 0 break elif board.board[temp_row][temp_col] == 0: is_win = False temp_score += 1 else: temp_score += pieces * 5 if self.g == 1: if temp_row_number % 2 != 0 and first_player == 1: temp_score += 40 elif temp_row_number % 2 == 0 and first_player == 2: temp_score += 40 if player_evaluated == first_player: score += temp_score else: score -= temp_score if temp_score != 0 and self.g == 1: if row_number % 2 != 0 and player_evaluated == first_player and first_player == 1: temp_score += 40 elif row_number % 2 != 0 and player_evaluated != first_player and first_player == 1: temp_score -= 20 elif row_number % 2 == 0 and player_evaluated == first_player and first_player == 2: temp_score += 40 elif row_number % 2 == 0 and player_evaluated != first_player and first_player == 2: temp_score -= 20 # if temp_score != 0: # if row_number % 2 != 0 and player_evaluated == first_player and first_player == 1: # temp_score += 40 # elif row_number % 2 != 0 and player_evaluated != first_player and first_player == 1: # temp_score -= 20 # elif row_number % 2 == 0 and player_evaluated == first_player and first_player == 2: # temp_score += 40 # elif row_number % 2 == 0 and player_evaluated != first_player and first_player == 2: # temp_score -= 20 if is_win: if first_player == self.player_number: # print("Evaluated Score: {}".format(math.inf)) # board.show_board() return math.inf else: # print("Evaluated Score: {}".format(-math.inf)) # board.show_board() return -math.inf if tie: # print("Evaluated Score: {}".format(50)) # board.show_board() return 50 # print("Evaluated Score: {}".format(score)) # board.show_board() return score def expand_node(self, board: Board) -> List: children = [] if self.g == 0: for i in range(self.col): for j in range(self.row): if board.board[j][i] == 0: children.append(MoveWithAnalysis(i, j, 0)) else: for i in range(self.col): if board.board[0][i] == 0: children.append(MoveWithAnalysis(i, 0, 0)) return children
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 def get_move(self, move): if len(move) != 0: self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 moves = self.board.get_all_possible_moves(self.color) moveValues = [] for i in range(len(moves)): for j in range(len(moves[i])): self.board.make_move(moves[i][j], self.color) moveValues.append((moves[i][j], self.minimax(self.board, 3, self.opponent[self.color], float('-inf'), float('inf')))) self.board.undo() move = max(moveValues, key=lambda x: x[1])[0] self.board.make_move(move, self.color) return move def static_eval(self, boardState): blackValue = 0 whiteValue = 0 for i in range(boardState.row): for j in range(boardState.col): checker = boardState.board[i][j] if checker.color == '.': continue elif checker.color == 'B': if checker.is_king: blackValue += 7 + boardState.row else: blackValue += 5 + checker.row else: if checker.is_king: whiteValue += 7 + boardState.row else: whiteValue += 5 + (boardState.row - checker.row) if self.color == 1: return blackValue - whiteValue else: return whiteValue - blackValue def generate_children(self, player) -> [Board]: children = [] checkers = self.board.get_all_possible_moves(player) for moveList in checkers: for move in moveList: boardCopy = deepcopy(self.board) boardCopy.make_move(move, player) children.append(boardCopy) return children def minimax(self, boardState, depth, max_player, alpha, beta): if depth == 0 or boardState.is_win(max_player): return self.static_eval(boardState) if max_player: best = float('-inf') for child in self.generate_children(self.color): candidate = self.minimax(child, depth - 1, False, alpha, beta) best = max(best, candidate) alpha = max(alpha, candidate) if alpha >= beta: break return best else: best = float('inf') for child in self.generate_children(self.opponent[self.color]): candidate = self.minimax(child, depth - 1, True, alpha, beta) best = min(best, candidate) beta = min(beta, candidate) if alpha >= beta: break return best
class StudentAI(): def __init__(self, col, row, p): self.col = col self.row = row self.p = p self.board = Board(col, row, p) self.board.initialize_game() self.color = '' self.opponent = {1: 2, 2: 1} self.color = 2 # Returns optimal value for current player def get_move(self, move): alpha = -1000 value = -1000 beta = 1000 bestMove = None if len(move) != 0: # If the opponent started first self.board.make_move(move, self.opponent[self.color]) else: self.color = 1 # Make a list of all possible moves that our AI can make our_moves = self.board.get_all_possible_moves(self.color) # Iterate through list of all our moves for x in range(len(our_moves)): for y in range(len(our_moves[x])): # Make a move on the copy/theoretical board self.board.make_move(our_moves[x][y], self.color) currentScore = self.alphaBetaMin(alpha, beta, 1) self.board.undo() if currentScore >= value: value = currentScore bestMove = our_moves[x][y] #print("New bestMove", bestMove, "current best score:", currentScore) alpha = currentScore #print("Decision?", bestMove) self.board.make_move(bestMove, self.color) return bestMove def alphaBetaMin(self, alpha, beta, depth): ''' # Check if our AI is black and we won #if self.color == self.board.is_win(self.color): if self.color == self.board.is_win("B"): return 1000 # Check if our AI (black) lost #elif self.color == 1 and self.board.is_win(self.color) == 2: elif self.color == 1 and self.board.is_win("B") == 2: return -1000 # Check if our AI (white) lost #elif self.color == 2 and self.board.is_win(self.color) == 1: elif self.color == 2 and self.board.is_win("W") == 1: return -1000 # Check if opponent will tie #if self.board.is_win(self.color) == -1: if self.board.is_win("B") == -1: return 0 ''' if depth == 3: return self.get_heuristic_score2() else: value = 1000 # Go through every possible move opponent_moves = self.board.get_all_possible_moves( self.opponent[self.color]) for x in opponent_moves: for move in x: # Make move for opponent self.board.make_move(move, self.opponent[self.color]) value = min(value, self.alphaBetaMax(alpha, beta, depth + 1)) self.board.undo() beta = min(beta, value) if alpha >= beta: return value return value def alphaBetaMax(self, alpha, beta, depth): ''' # Check if our AI is black and we won #if self.color == self.board.is_win(self.opponent[self.color]): if self.color == self.board.is_win("B"): return 1000 # Check if our AI (black) lost #elif self.color == 1 and self.board.is_win(self.opponent[self.color]) == 2: elif self.color == 1 and self.board.is_win("B") == 2: return -1000 # Check if our AI (white) lost #elif self.color == 2 and self.board.is_win(self.opponent[self.color]) == 1: elif self.color == 2 and self.board.is_win("W") == 1: return -1000 # Check if opponent will tie #if self.board.is_win(self.opponent[self.color]) == -1: if self.board.is_win("B") == -1: return 0 ''' if depth == 3: return self.get_heuristic_score2() else: value = -1000 # Go through every possible move our_moves = self.board.get_all_possible_moves(self.color) for x in our_moves: for move in x: self.board.make_move(move, self.color) value = max(value, self.alphaBetaMin(alpha, beta, depth + 1)) self.board.undo() alpha = max(alpha, value) if alpha >= beta: return value return value def closeToBecomingKing(self, color, row_position): if self.color == 1: # Our color is black return row_position else: # our color is white return (self.board.row - row_position - 1) def get_heuristic_score2(self): num_black_kings = 0 num_white_kings = 0 num_safe_piece_black = 0 num_safe_piece_white = 0 num_back_black = 0 num_back_white = 0 closer_black = 0 closer_white = 0 #score = 0 for x in range(len(self.board.board)): for y in range(len(self.board.board[x])): # Check if it's our checker piece if (self.board.board[x][y].get_color() == 'B'): # Check if it's a king if (self.board.board[x][y].is_king == True): num_black_kings += 1 else: # Check how close checker piece is to becoming King closer_black += self.closeToBecomingKing(self.color, x) cp = self.board.board[x][y].get_location() # Check if black checker piece is in the back if (cp[0] == 0): num_back_black += 1 # Check if it's an edge piece row 0, row n, col 0, col n if (cp[0] == 0 or cp[0] == self.board.row - 1): num_safe_piece_black += 1 if (cp[1] == 0 or cp[1] == self.board.col - 1): num_safe_piece_black += 1 if (cp[0] == 0 and cp[1] == 0): num_safe_piece_black -= 1 if (cp[0] == 0 and cp[1] == self.board.col - 1): num_safe_piece_black -= 1 if (cp[0] == self.board.row - 1 and cp[1] == 0): num_safe_piece_black -= 1 if (cp[0] == self.board.row - 1 and cp[1] == self.board.col - 1): num_safe_piece_black -= 1 # Check for safe pieces that are not part of the edge if (cp[0] != 0 and cp[0] != self.board.row - 1): if (cp[1] != 0 and cp[1] != self.board.col - 1): is_safe = True if (self.board.board[x + 1][y - 1].get_color() == 'W'): if (self.board.board[x - 1][y + 1].get_color() == '.'): is_safe = False if (self.board.board[x + 1][y + 1].get_color() == 'W'): if (self.board.board[x - 1][y - 1].get_color() == '.'): is_safe = False if (self.board.board[x - 1][y + 1].get_color() == 'W' and self.board.board[x - 1][y + 1].is_king): if (self.board.board[x + 1][y - 1].get_color() == '.'): is_safe = False if (self.board.board[x - 1][y - 1].get_color() == 'W' and self.board.board[x - 1][y - 1].is_king): if (self.board.board[x + 1][y + 1].get_color() == '.'): is_safe = False if (is_safe == True): #print("safe piece counted") num_safe_piece_black += 1 #else: #print(x, y) #print("safe piece not counted") #score -= 2 ''' # Check for safe pieces that are part of the edges is_safe = True # Check for safe piece on edge (column - 1) if (cp[1] == self.board.col - 1): if(self.board.board[x + 1][y - 1].get_color() == 'W'): is_safe = False # Check for safe piece on edge (0) if (cp[1] == 0): if(self.board.board[x + 1][y + 1].get_color() == 'W'): is_safe = False # check for safe piece on edge (column - 1) when a King if (cp[1] == self.board.col - 1 and ((cp[0] > 0) or (cp[0] < self.board.row - 1))): if(self.board.board[x - 1][y - 1].get_color() == 'W'): is_safe = False if(self.board.board[x + 1][y - 1].get_color() == 'W'): is_safe = False # check for safe piece on edge (0) when a King if (cp[1] == 0 and ((cp[0] > 0) or (cp[0] < self.board.row - 1))): if(self.board.board[x - 1][y + 1].get_color() == 'W'): is_safe = False if(self.board.board[x + 1][y + 1].get_color() == 'W'): is_safe = False if (is_safe == True): num_safe_piece_black += 1 ''' elif (self.board.board[x][y].get_color() == 'W'): if (self.board.board[x][y].is_king == True): num_white_kings += 1 else: closer_white += self.closeToBecomingKing(2, x) # Check if it's a corner piece either (0, 0), (0, n), (n, 0), or (n, n) cp = self.board.board[x][y].get_location() # Check if white checker piece is in the back if (cp[0] == self.board.row - 1): num_back_white += 1 # Check if it's an edge piece row 0, row n, col 0, col n if (cp[0] == 0 or cp[0] == self.board.row - 1): num_safe_piece_white += 1 if (cp[1] == 0 or cp[1] == self.board.col - 1): num_safe_piece_white += 1 if (cp[0] == 0 and cp[1] == 0): num_safe_piece_white -= 1 if (cp[0] == 0 and cp[1] == self.board.col - 1): num_safe_piece_white -= 1 if (cp[0] == self.board.row - 1 and cp[1] == 0): num_safe_piece_white -= 1 if (cp[0] == self.board.row - 1 and cp[1] == self.board.col - 1): num_safe_piece_white -= 1 # Check for white safe pieces that are not part of the edge if (cp[0] != 0 and cp[0] != self.board.row - 1): if (cp[1] != 0 and cp[1] != self.board.col - 1): is_safe = True if (self.board.board[x - 1][y - 1].get_color() == 'B'): if (self.board.board[x + 1][y + 1].get_color() == '.'): is_safe = False if (self.board.board[x - 1][y + 1].get_color() == 'B'): if (self.board.board[x + 1][y - 1].get_color() == '.'): is_safe = False if (self.board.board[x + 1][y + 1].get_color() == 'B' and self.board.board[x + 1][y + 1].is_king): if (self.board.board[x - 1][y - 1].get_color() == '.'): is_safe = False if (self.board.board[x + 1][y - 1].get_color() == 'B' and self.board.board[x + 1][y - 1].is_king): if (self.board.board[x - 1][y + 1].get_color() == '.'): is_safe = False if (is_safe == True): num_safe_piece_white += 1 if self.color == 1: score = 10 * (self.board.black_count - self.board.white_count) #print("Score after diff in counts:", score) #print('safe black:', num_safe_piece_black, 'safe white:', num_safe_piece_white, 'safe score:', num_safe_piece_black - num_safe_piece_white) score += 5 * (num_black_kings - num_white_kings) #print("Score after diff in Ks:", score) #score += 2*(closer_black - closer_white) score += 2 * (num_safe_piece_black - num_safe_piece_white) #print("Score after diff in safe pieces:", score) score += 2 * (num_back_black - num_back_white) #print("Score after back row pieces:", score) elif self.color == 2: score = 10 * (self.board.white_count - self.board.black_count) #print("Score after diff in counts:", score) #print('safe black:', num_safe_piece_black, 'safe white:', num_safe_piece_white, 'safe score:', num_safe_piece_black - num_safe_piece_white) score += 5 * (num_white_kings - num_black_kings) #print("Score after diff in Ks:", score) #score += 2*(closer_black - closer_white) score += 2 * (num_safe_piece_white - num_safe_piece_black) #print("Score after diff in safe pieces:", score) score += 2 * (num_back_white - num_back_black) #print("Score after back row pieces:", score) return score