def min_value(self, game_state: GameState, depth=0, ghost_index=0, alpha=-math.inf, beta=math.inf):

        # next_ghost_to_move
        ghost_index += 1

        if self.is_a_new_level_of_search(game_state, ghost_index):
            depth = depth + 1

        if game_state.isWin() or game_state.isLose():
            return self.evaluationFunction(game_state)

        value = math.inf

        legal_actions = game_state.getLegalActions(ghost_index)

        for action in legal_actions:
            successor = game_state.getNextState(ghost_index, action)

            if self.is_a_new_level_of_search(game_state, ghost_index):
                # let's move on with pacman since this is the last agent (new max node)
                value = min(value, self.max_value(successor, depth=depth, alpha=alpha, beta=beta))
            else:
                # next on the tree is another minimizer, lets continue with another ghost
                value = min(value,
                            self.min_value(successor, depth=depth, ghost_index=ghost_index, alpha=alpha, beta=beta))

            if value < alpha:
                return value
            beta = min(beta, value)

        return value
    def exp_value(self, game_state: GameState, depth=0, ghost_index=0):

        # next_ghost_to_move
        ghost_index += 1

        if self.is_a_new_level_of_search(game_state, ghost_index):
            depth = depth + 1

        if game_state.isWin() or game_state.isLose():
            return self.evaluationFunction(game_state)

        value = 0

        legal_actions = game_state.getLegalActions(ghost_index)

        for action in legal_actions:
            successor = game_state.getNextState(ghost_index, action)

            probability = 1 / len(legal_actions)

            if self.is_a_new_level_of_search(game_state, ghost_index):
                # let's move on with pacman since this is the last agent (new max node)
                value += probability * self.max_value(successor, depth=depth)
            else:
                # next on the tree is another minimizer, lets continue with another ghost
                value += probability * self.exp_value(successor, depth=depth, ghost_index=ghost_index)

        return value
    def getAction(self, game_state: GameState):
        """
        Returns the minimax action using self.depth and self.evaluationFunction
        """
        "*** YOUR CODE HERE ***"
        # Generate candidate actions
        legal_actions = game_state.getLegalActions(self.pacman_index)

        # if Directions.STOP in legal_actions:
        #    legal_actions.remove(Directions.STOP)

        alpha = -math.inf
        beta = math.inf
        scores = []

        for action in legal_actions:
            successor = game_state.getNextState(self.pacman_index, action)
            # since we're expanding the root node, we need to call min_value since the next node is a min node
            value = self.min_value(successor, depth=0, ghost_index=0, alpha=alpha, beta=beta)
            scores.append(value)

            # can't prune on the root node
            alpha = max(alpha, value)

        best_score = max(scores)
        best_indices = [index for index in range(len(scores)) if scores[index] == best_score]
        chosen_index = random.choice(best_indices)  # Pick randomly among the best

        return legal_actions[chosen_index]
    def max_value(self, game_state: GameState, depth):

        if self.is_terminal_state(game_state, depth):
            return self.evaluationFunction(game_state)

        value = -math.inf

        legal_actions = game_state.getLegalActions(self.pacman_index)

        for action in legal_actions:
            successor = game_state.getNextState(self.pacman_index, action)
            value = max(value, self.exp_value(successor, depth=depth, ghost_index=0))

        return value
    def max_value(self, game_state: GameState, depth, alpha=-math.inf, beta=math.inf):

        if self.is_terminal_state(game_state, depth):
            return self.evaluationFunction(game_state)

        value = -math.inf

        legal_actions = game_state.getLegalActions(self.pacman_index)

        for action in legal_actions:
            successor = game_state.getNextState(self.pacman_index, action)
            value = max(value, self.min_value(successor, depth=depth, ghost_index=0, alpha=alpha, beta=beta))

            if value > beta:
                return value
            alpha = max(alpha, value)

        return value
    def getAction(self, game_state: GameState):
        """
        Returns the minimax action from the current gameState using self.depth
        and self.evaluationFunction.

        Here are some method calls that might be useful when implementing minimax.

        gameState.getLegalActions(agentIndex):
        Returns a list of legal actions for an agent
        agentIndex=0 means Pacman, ghosts are >= 1

        gameState.getNextState(agentIndex, action):
        Returns the child game state after an agent takes an action

        gameState.getNumAgents():
        Returns the total number of agents in the game

        gameState.isWin():
        Returns whether or not the game state is a winning state

        gameState.isLose():
        Returns whether or not the game state is a losing state
        """
        "*** YOUR CODE HERE ***"
        # Generate candidate actions
        legal_actions = game_state.getLegalActions(self.pacman_index)

        # if Directions.STOP in legal_actions:
        #    legal_actions.remove(Directions.STOP)

        # since we're expanding the root node, we need to call min_value since the next node is a min node
        scores = [self.min_value(game_state.getNextState(self.pacman_index, action), depth=0, ghost_index=0) for action
                  in legal_actions]
        best_score = max(scores)
        best_indices = [index for index in range(len(scores)) if scores[index] == best_score]
        chosen_index = random.choice(best_indices)  # Pick randomly among the best

        # input('next')

        return legal_actions[chosen_index]
    def getAction(self, game_state: GameState):
        """
        Returns the expectimax action using self.depth and self.evaluationFunction

        All ghosts should be modeled as choosing uniformly at random from their
        legal moves.
        """
        "*** YOUR CODE HERE ***"
        # Generate candidate actions
        legal_actions = game_state.getLegalActions(self.pacman_index)

        # if Directions.STOP in legal_actions:
        #    legal_actions.remove(Directions.STOP)

        # since we're expanding the root node, we need to call min_value since the next node is a min node
        scores = [self.exp_value(game_state.getNextState(self.pacman_index, action), depth=0, ghost_index=0) for action
                  in legal_actions]
        best_score = max(scores)
        best_indices = [index for index in range(len(scores)) if scores[index] == best_score]
        chosen_index = random.choice(best_indices)  # Pick randomly among the best

        # input('next')

        return legal_actions[chosen_index]