示例#1
0
 def __init__(self):
     self.state = State()
     self.state.initial_state()
     self.possible_action_keys = []
     self.two_players = False
     self.player_vs_ai_white = False
     self.ai_agent = MinimaxABAgent(player_color=0)
示例#2
0
    def __init__(self, root=None, master=None):
        super().__init__(master)
        self.master = master
        self.root = root

        # State
        self.state = State()

        # Views
        # Make the top level window to be resizable, and take up entire available width
        top = self.winfo_toplevel()
        top.rowconfigure(0, weight=1)
        top.columnconfigure(0, weight=1)
        self.rowconfigure(0, weight=1)
        self.columnconfigure(0, weight=1)

        self.menu_bar = V_menubar.GameViewMenuBar(self, self)
        self.main_menu = V_menu.MainMenu(self, self)
        self.practice_view = V_practice.PracticeView(self, self, self.state)
        self.vsCpu_view = V_vsCpu.VsCpuView(self, self, self.state)
        self.stats_view = V_stats.StatsView(self, self)

        self.main_menu.draw()
        self.grid(sticky=tk.N + tk.S + tk.E + tk.W)

        # Music
        mx.init()
        mx.music.load('./resources/music.mp3')
        mx.music.play(loops=-1)
        mx.music.set_volume(0.07)
        self.key_sound = mx.Sound('./resources/Keyboard Sounds_Cherry Clear.mp3')

        # Initiate Playtime counting
        self.incrementPlayTime()
    def play_with_ai_white(self, ai_agent=ControllerConfig.AI_AGENT):
        """
        Returns
        -------
        dict
            Dict of possible action and state
        """
        self.state = State()
        self.state.initial_state()
        self.player_vs_ai_white = True
        state_dict = AIElements.get_state_dict(self.state)
        possible_action = AIElements.get_possible_action(self.state)
        self.possible_action_keys = possible_action.keys()

        if ai_agent == 'random':
            self.ai_agent = RandomAgent()
        elif ai_agent == 'minimaxab':
            self.ai_agent = MinimaxABAgent(player_color=0)
        elif ai_agent == 'azero':
            self.ai_agent = AlphaZeroAgent()

        self.old_state_reward = deepcopy(self.state)

        return {
            "state": state_dict,
            "possible_action": possible_action,
            "task": "CHANGE_PLAYER"
        }
示例#4
0
    def test_unsolvable(self):
        # given
        initial_state = State(self.board2)

        # when
        solvability = initial_state.is_solvable()
        # then
        self.assertEqual(solvability, False)
示例#5
0
    def test_solvable_3x3(self):
        # given
        initial_state = State(self.board3)

        # when
        solvability = initial_state.is_solvable()

        # then
        self.assertEqual(solvability, True)
 def _retrieve_choices(self, policy):
     choices_with_usable = [[
         policy.hit_certainty_in(State(player_sum, opponent_sum, True))
         for opponent_sum in range(2, 12)
     ] for player_sum in range(12, 22)]
     choices_without_usable = [[
         policy.hit_certainty_in(State(player_sum, opponent_sum, False))
         for opponent_sum in range(2, 12)
     ] for player_sum in range(12, 22)]
     return choices_with_usable, choices_without_usable
示例#7
0
def warrior(game_map, game_params, game_teams):
    global start
    global ticks

    while True:
        state = State(input(), game_teams, game_params)

        my_buildings = state.my_buildings()
        my_squads = state.my_squads()
        # сортируем по остаточному пути
        my_squads.sort(key=lambda c: c.way.left, reverse=False)

        enemy_buildings = state.enemy_buildings()
        enemy_squads = state.enemy_squads()

        neutral_buildings = state.neutral_buildings()

        forges_buildings = state.forges_buildings()

        if state.ability_ready(AbilityType.Area_damage):
            cast_aoe(enemy_squads, enemy_buildings, game_map, game_teams)
        move_units(enemy_buildings, my_buildings, enemy_squads, game_map,
                   game_teams)
        ticks += 1
        print("end")
示例#8
0
文件: test_state.py 项目: vynaloze/15
 def test_single_iteration(self):
     # given
     initial_state = State(self.board1)
     expected_states = [
         State(self.board12, [MoveDown()], [self.board1]),
         State(self.board11, [MoveRight()], [self.board1]),
     ]
     # when
     states = self._iterate(initial_state)
     # then
     self.assertListEqual(states, expected_states)
 def _retrieve_choices(self, policy):
     choices_with_usable = [[
         policy.make_decision_in(State(player_sum, opponent_sum,
                                       True)).value
         for opponent_sum in range(2, 12)
     ] for player_sum in range(12, 22)]
     choices_without_usable = [[
         policy.make_decision_in(State(player_sum, opponent_sum,
                                       False)).value
         for opponent_sum in range(2, 12)
     ] for player_sum in range(12, 22)]
     return choices_with_usable, choices_without_usable
示例#10
0
 def should_merge(self, state_1: State, state_2: State) -> bool:
     # import os
     # if state_1.get_state_id_config() == state_2.get_state_id_config():
     #     if self.get_phash_from_state(state_1) - self.get_phash_from_state(state_2) < VisualSimilarityMerger.PHASH_EPSILON:
     #         return True
     #     else:
     #         state_1_image_path = next(iter(state_1.image_paths))
     #         cmd = f"open {state_1_image_path}; open {next(iter(state_2.image_paths))}"
     #         print(cmd)
     #         os.system(cmd)
     #         return False
     # else:
     #     return False
     return self.get_phash_from_state(state_1) - self.get_phash_from_state(state_2) < VisualSimilarityMerger.PHASH_EPSILON \
            and state_1.get_state_id_config() == state_2.get_state_id_config()
示例#11
0
def get_state():
	state = State()
	first = True
	for file in os.listdir("examples"):
		if os.path.splitext(file)[1] != ".zip":
			continue
		zfile = ZipFile("examples/" + file)
		content = zfile.read("contents.xml")
		tree = ElementTree.fromstring(content)
		if first:
			state.fromXml(tree)
			first = False
		else:
			state.merge(tree)
	return  state
示例#12
0
    def populate_board(self, seed):
        board = np.zeros((self.size, self.size), 'b')
        num_pieces = int((self.size * self.size) / 2)
        pieces = []
        if seed is not None:
            if seed != "random":
                self.rand_seed_used = seed
                np.random.seed(seed)
            # Generate random positions for pieces
            squares = np.arange(0, self.size * self.size)
            np.random.shuffle(squares)

            # Populate board with equal amount of white and black pieces
            for i in range(num_pieces):
                num = squares[i]
                Y = int(num / self.size)
                X = int(num % self.size)
                piece = 1 if i < num_pieces/2 else -1

                board[Y][X] = piece
                pieces.append((Y, X))
            pieces.sort()
        else:
            # Position pieces as a 'Chess formation'.
            board[:][0:2] = -1
            board[:][-2:] = 1
            pieces.extend([(0, x) for x in range(self.size)])
            pieces.extend([(1, x) for x in range(self.size)])
            pieces.extend([(self.size-2, x) for x in range(self.size)])
            pieces.extend([(self.size-1, x) for x in range(self.size)])

        self.init_state = State(board, True, pieces=pieces)
示例#13
0
 def updateEnvironment(self):
     # Aqui vamos a actualizar el entorno
     for chrom in self.environment:
         # Por cada cromo en el entorno, si no esta en el listado,
         # aumentarlo mediante un append
         if chrom not in self.un:
             self.un.append(chrom)
     self.minCosts = list()
     costs = list()
     newEnvironment = list()
     # Creamos un nuevo entorno
     for chrom in self.environment:
         state = State(chrom)
         costs.append(state.cost)
     if min(costs) == 0:
         self.solution = costs.index(min(costs))
         self.goal = self.environment[self.solution]
         return self.environment
     while len(newEnvironment) < self.d:
         minCost = min(costs)
         minIndex = costs.index(minCost)
         self.minCosts.append(costs[minIndex])
         newEnvironment.append(self.environment[minIndex])
         costs.remove(minCost)
         self.environment.remove(self.environment[minIndex])
     self.environment = newEnvironment
     # Seteamos la nueva variable en el entorno ya mencionado
     # para seguir trabajando con esto
     print(self.minCosts, ">>", len(self.un))
    def play_starting_in(self, initial_state: State) -> GameInfo:
        game_info = GameInfo()

        player_state = self._play_stage(initial_state=initial_state,
                                        policy=self._player_policy,
                                        log_action=game_info.log_player)

        if player_state == BUST:
            game_info.set_winner(Winner.DEALER)
            return game_info

        dealer_cards = (initial_state.opponent_points,
                        self._deck.get_next_card())
        dealer_state = self._play_stage(initial_state=State.from_deal(
            *dealer_cards, player_state.current_sum),
                                        policy=self._dealer_policy,
                                        log_action=game_info.log_dealer)

        if dealer_state == BUST:
            game_info.set_winner(Winner.PLAYER)
            return game_info

        if player_state.current_sum > dealer_state.current_sum:
            game_info.set_winner(Winner.PLAYER)
        elif player_state.current_sum == dealer_state.current_sum:
            game_info.set_winner(Winner.DRAW)
        else:
            game_info.set_winner(Winner.DEALER)

        return game_info
示例#15
0
    def update(self):
        """
        Update the command line interface to the latest controller state.
        """

        # print configuration
        print(format_fans(fans=Configuration.fans))
        print(format_ports(ports=Configuration.ports))
        print(format_ambients(ambients=Configuration.ambients))
        print(format_limits(limits=Configuration.limits))

        # print runtime state
        print(format_pwms(pwms=State().pwms))
        print(format_rpms(rpms=State().rpms))
        print(format_temps(temps=State().temperatures))
        print(format_headrooms(headrooms=State().headrooms))
        print()
 def epsilon_greedy_from_values(cls, values: dict, exploring_prob: Callable):
     mapping = dict()
     for s in State.get_all_states():
         if values[StateActionPair(s, Action.STICK)] > values[StateActionPair(s, Action.HIT)]:
             mapping[s] = [1. - exploring_prob(), exploring_prob()]
         else:
             mapping[s] = [exploring_prob(), 1. - exploring_prob()]
     return Policy.from_probabilistic_mapping(mapping)
 def from_values(cls, values: dict):
     mapping = dict()
     for s in State.get_all_states():
         if s.current_sum < 12: mapping[s] = Action.HIT
         elif values[StateActionPair(s, Action.STICK)] > values[StateActionPair(s, Action.HIT)]:
             mapping[s] = Action.STICK
         else: mapping[s] = Action.HIT
     return Policy.from_deterministic_mapping(mapping)
def fight_agent(best_model: str,
                current_model: str,
                ae,
                round_fight=AlphaZeroConfig.ROUND_ARENA,
                max_turn=AlphaZeroConfig.MAX_TURN_ARENA,
                max_simulation=AlphaZeroConfig.MAX_SIMULATION_ARENA):
    """
    The pitted 2 agents. We will check who is the best here.
    :param best_model: The current best model file path
    :param current_model: The current model file path
    :param ae: The Action Encoder
    :param round_fight: number of round to determine the winner
    :param max_turn: The maximum turn of the game. If the current turn is higher than max turn.
        It will be cut and the outcome of the game is draw.
    :param max_simulation: The maximum of simulation
    :return: dict, The dictionary of the score
    """
    from ai_modules.reinforcement_algorithm import AlphaZeroAgent

    loss_win = {0: 0, 1: 0}
    for round in range(round_fight):
        print("ROUND {}".format(round + 1))
        terminal = False
        count_turn = 1
        state = State()
        state.initial_state()
        best_model_agent = AlphaZeroAgent(state, max_simulation,
                                          best_model)  # 1
        current_model_agent = None  # 0
        while not terminal and count_turn <= max_turn:
            print("=======TURN {} ========".format(count_turn))
            state.print_board()
            current_player_turn = state.get_player_turn()
            if current_player_turn == 1:
                key, dict_key = best_model_agent.choose_action(state)
                state = AIElements.result_function(state, dict_key)
                if current_model_agent is not None:
                    current_model_agent.enemy_turn_action(key, state)
            else:
                if current_model_agent is None:
                    current_model_agent = AlphaZeroAgent(
                        state, max_simulation, current_model)
                key, dict_key = current_model_agent.choose_action(state)
                state = AIElements.result_function(state, dict_key)
                best_model_agent.enemy_turn_action(key, state)
            print("Player %d choose action %s" % (current_player_turn, key))

            game_ended = state.is_terminal()
            if game_ended:
                print("Player {} Win".format(count_turn % 2))
                loss_win[(current_player_turn) % 2] += 1
                terminal = True
            count_turn += 1
            if count_turn > max_turn:
                print("ROUND {} DRAW".format(round + 1))
    return loss_win
示例#19
0
    def cpu_button_press(self, index):
        assert index in range(9)

        # Adds token to the tic tac toe board
        if not self.state.add_token(CONST.LOC_LOCAL_CPU, index): return
        self.vsCpu_view.update_tooltip(f'You had made your move at grid {index}!')
        self.play_sound()
        self.refresh_boards()

        # Checks for winning
        winner = State.checkWinningState(self.state.cpu_board)
        # Player wins, or tied
        if winner is not None:
            Controller.show_cpu_winner(winner, self.state.isPlayerFirst)
            self.state.game_set(CONST.LOC_LOCAL_CPU, winner)
            self.vsCpu_view.update_tooltip(f'A new game has begin')
            self.refresh_boards()

            # After refresh, check if CPU moves first
            if not self.state.isPlayerFirst:
                move = self.state.cpu_moves()
                self.play_sound()
                self.refresh_boards()
                self.vsCpu_view.update_tooltip(f'CPU had made its move at grid {move}!')
                return

        # Player moves but not yet deterministic. CPU moves and check state
        move = self.state.cpu_moves()
        self.play_sound()
        self.refresh_boards()
        self.vsCpu_view.update_tooltip(f'CPU had made its move at grid {move}!')
        winner = State.checkWinningState(self.state.cpu_board)
        # Tie or CPU wins
        if winner is not None:
            Controller.show_cpu_winner(winner, self.state.isPlayerFirst)
            self.state.game_set(CONST.LOC_LOCAL_CPU, winner)
            self.refresh_boards()
            self.vsCpu_view.update_tooltip('A new game has begin')

            # After refresh, check if CPU moves first
            if not self.state.isPlayerFirst:
                move = self.state.cpu_moves()
                self.play_sound()
                self.refresh_boards()
                self.vsCpu_view.update_tooltip(f'CPU had made its move at grid {move}!')
示例#20
0
文件: test_state.py 项目: vynaloze/15
 def test_two_iterations(self):
     # given
     initial_state = State(self.board1)
     expected_states = [
         State(self.board121, [MoveDown(), MoveUp()],
               [self.board1, self.board12]),
         State(self.board122, [MoveDown(), MoveRight()],
               [self.board1, self.board12]),
         State(self.board112, [MoveRight(), MoveDown()],
               [self.board1, self.board11]),
         State(self.board111, [MoveRight(), MoveLeft()],
               [self.board1, self.board11]),
     ]
     # when
     states1 = self._iterate(initial_state)
     states2 = [s for states in states1 for s in self._iterate(states)]
     # then
     self.assertListEqual(states2, expected_states)
示例#21
0
 def __init__(self, num_of_states: int = 1, num_of_actions: int = 1, alfa: float = 0.1,
              lambd: float = 0.1, epsilon=0.1, g_init: float = 0, n_init: float = 0):
     """
     Actor only approch model
     """
     self.states = []
     for _ in range(num_of_states):
         state = State(num_of_actions=num_of_actions, alfa=alfa, lambd=lambd, epsilon=epsilon,
                       g_init=g_init, n_init=n_init)
         self.states.append(state)
示例#22
0
    def test_whole_state(self):
        # given
        start_state = self.state2
        target_state = State(self.board1, [MoveLeft()], [self.board2])

        # when
        solved_state = IDFS().solve(start_state, h0)

        # then
        self.assertEqual(target_state, solved_state)
示例#23
0
 def start_state(self):
     super.__doc__
     board = np.zeros((self.size, self.size), dtype="b")
     half = self.size // 2
     board[half][half - 1] = 1
     board[half - 1][half] = 1
     board[half][half] = -1
     board[half - 1][half - 1] = -1
     pieces = [(half, half - 1), (half - 1, half), (half, half),
               (half - 1, half - 1)]
     return State(board, True, pieces=pieces)
示例#24
0
    def play_with_two_players_start(self):
        """
        Return the initial state

        Returns
        -------
        dict
            Dict of possible Action and state
        """
        self.state = State()
        self.state.initial_state()
        self.two_players = True
        state_dict = AIElements.get_state_dict(self.state)
        possible_action = AIElements.get_possible_action(self.state)
        self.possible_action_keys = possible_action.keys()
        return {
            "state": state_dict,
            "possible_action": possible_action,
            "task": "CHANGE_PLAYER"
        }
示例#25
0
文件: test_dfs.py 项目: vynaloze/15
class TestState(unittest.TestCase):
    rows = 2
    cols = 2
    board1 = Board([Node(0), Node(1), Node(2), Node(3)], rows, cols)
    board2 = Board([Node(1), Node(0), Node(2), Node(3)], rows, cols)
    board3 = Board([Node(1), Node(3), Node(0), Node(2)], rows, cols)
    state1 = State(board1)
    state2 = State(board2)
    state3 = State(board3)

    def test_board_only(self):
        # given
        start_state = self.state2

        # when
        solved_state = DFS().solve(start_state, h0)

        # then
        self.assertListEqual(self.board1.content,
                             solved_state.current_board.content)

    def test_whole_state(self):
        # given
        start_state = self.state2
        target_state = State(self.board1, [MoveLeft()], [self.board2])

        # when
        solved_state = DFS().solve(start_state, h0)

        # then
        self.assertEqual(target_state, solved_state)

    def test_longer(self):
        start_state = self.state3

        # when
        solved_state = DFS().solve(start_state, h0)

        # then
        self.assertEqual(sorted(start_state.current_board.content),
                         solved_state.current_board.content)
示例#26
0
 def make_temp_board_solution(self, i, j, BestCost):
     tempBoard = self.copyBoard()
     # creamos una variable temporal de borde
     tempBoard[i] = (tempBoard[i] + j + 1) % self.d
     tempState = State(tempBoard)
     if tempState.cost < BestCost:
         BestCost = tempState.cost
         self.solutions.clear()
         self.solutions.append([tempBoard[i], i, BestCost])
     elif tempState.cost == BestCost:
         self.solutions.append([tempBoard[i], i, BestCost])
     return BestCost
示例#27
0
 def result(self, state, action):
     super.__doc__
     copy_arr = np.copy(state.board)
     new_state = State(copy_arr, not state.player,
                       [p for p in state.pieces])
     if not action:
         return new_state
     player_num = 1 if state.player else -1
     y, x = action.dest
     result(copy_arr, y, x, self.size, player_num)
     new_state.pieces.append((y, x))
     return new_state
示例#28
0
    def control(self, rpms):
        # TODO adjust pwms to rpms

        temperatures = get_temps()

        headrooms = get_headrooms(temperatures=temperatures)

        pwms = get_pwms(headrooms=headrooms)

        State().update(headrooms=headrooms,
                       pwms=pwms,
                       rpms=rpms,
                       temperatures=temperatures)
示例#29
0
    def __init__(self, screen_dimension):
        self.width, self.height = screen_dimension
        self._background = pygame.transform.scale(
            pygame.image.load(
                os.path.join('../', 'assets', 'img', 'background.jpg')),
            (self.width, self.height))

        self._level_creator = LevelCreator()
        self._level = 1
        self._state = State(
            self._level_creator.create(self._level, (self.width, self.height)))
        self._header = None
        self._buttons = []
        pygame.mixer.music.stop()
示例#30
0
def main(user_interface):
    controller = Controller()
    user_interface = _get_user_interface(ui=user_interface)
    ttys = ['/dev/ttyACM0', '/dev/ttyACM1', '/dev/ttyACM2', '/dev/ttyACM3']
    with Bridgehead(ttys=ttys, baudrate=9600) as bridgehead:
        rpms = [None] * len(Configuration.fans)
        while True:
            controller.control(rpms)

            user_interface.update()

            try:
                rpms = bridgehead(pwms=State().pwms)
            except KeyboardInterrupt:
                break
示例#31
0
 def __init__(self, maze_rows, maze_cols):
     self.__maze_rows = maze_rows
     self.__maze_cols = maze_cols
     self.__maze = self.generate_random_grid()
     self.__states = StateMap.get_instance()
     for i in range(0, maze_rows):
         for j in range(0, maze_cols):
             if self.__maze[i, j] == 1:
                 cell_type = CellType.block
             else:
                 cell_type = CellType.empty
             if (i, j) == (maze_rows - 1, maze_cols - 1):
                 cell_type = CellType.end
             s = State(i, j, self.__maze, cell_type)
             self.__states.add_state(i, j, s)