def __init__(self, n, path="input.txt"): self.size = n with open(path, 'r') as f: lines = f.readlines() self.our_player = int(lines[0]) self.other_player = 3 - self.our_player self.cur_step_num = None # self.good_moves = [Move(Point(1, 1)), # Move(Point(1, 2)), # Move(Point(1, 3)), # Move(Point(2, 1)), # Move(Point(2, 3)), # Move(Point(3, 1)), # Move(Point(3, 2)), # Move(Point(3, 3))] self.good_moves = [ Move(Point(1, 1)), Move(Point(1, 3)), Move(Point(3, 1)), Move(Point(3, 3)) ] previous_board_arr = [] current_board_arr = [] black_stones = 0 white_stones = 0 for line in lines[1:self.size + 1]: row = [int(x) for x in line.rstrip('\n')] previous_board_arr.append(row) for line in lines[self.size + 1:2 * self.size + 1]: row = [] for x in line.rstrip('\n'): row.append(int(x)) if int(x) == BLACK: black_stones += 1 elif int(x) == WHITE: white_stones += 1 current_board_arr.append(row) previous_board = Board(self.size, previous_board_arr) current_board = Board(self.size, current_board_arr) self.board = current_board self.previous_state = State(previous_board, None, self.other_player, None) self.active_state = State(current_board, self.previous_state, self.our_player, None) self.set_step_num(black_stones, white_stones)
def max_value(self, state: State, depth): if depth == 0: return self.eval_function(state) if state.is_terminal(): return self.utility_function(state) best_recorded_score = MIN_SCORE moves = deepcopy(self.all_moves) shuffle(moves) for move in moves: new_state = state.apply_move(move) if new_state is not None: if not new_state.board.fills_own_eye(move): cur_move_score = self.min_value(new_state, depth-1) if cur_move_score > best_recorded_score: best_recorded_score = cur_move_score return best_recorded_score
def min_value(self, state: State, depth, alpha, beta): if state.is_terminal() or self.remaining_steps - depth == 0: return self.utility_function(state) if depth == self.depth: return self.eval_function(state) least_recorded_score = MAX_SCORE moves = deepcopy(self.all_moves) shuffle(moves) for move in moves: new_state = state.apply_move(move) if new_state is not None: cur_move_score = self.max_value(new_state, depth+1, alpha, beta) if cur_move_score < least_recorded_score: least_recorded_score = cur_move_score beta = min(beta, least_recorded_score) if beta <= alpha: return least_recorded_score return least_recorded_score
def min_value(self, state:State, depth): if depth == 0: return self.eval_function(state) if state.is_terminal(): if state.winner() == self.our_player: return MAX_SCORE else: return MIN_SCORE least_recorded_score = MAX_SCORE moves = deepcopy(self.all_moves) shuffle(moves) for move in moves: new_state = state.apply_move(move) if new_state is not None: if not new_state.board.fills_own_eye(move): cur_move_score = self.max_value(new_state, depth-1) if cur_move_score < least_recorded_score: least_recorded_score = cur_move_score return least_recorded_score
def utility_function(self, state: State): if state.winner() == self.our_player: return MAX_SCORE else: return MIN_SCORE
def eval_function(self, state: State): black_minus_white = state.stone_diff() if self.our_player == WHITE: return -1 * black_minus_white return black_minus_white