コード例 #1
0
ファイル: test_board.py プロジェクト: polokk/katrain
 def test_collide(self, new_game):
     b = Game(MockKaTrain(force_package_config=True), MockEngine(), move_tree=new_game)
     b.play(Move.from_gtp("B9", player="B"))
     with pytest.raises(IllegalMoveException):
         b.play(Move.from_gtp("B9", player="W"))
     assert 1 == len(self.nonempty_chains(b))
     assert 1 == len(b.stones)
     assert 0 == len(b.prisoners)
コード例 #2
0
ファイル: test_board.py プロジェクト: serpiente/katrain
 def test_merge(self):
     b = Game(MockKaTrain(), MockEngine())
     b.play(Move.from_gtp("B9", player="B"))
     b.play(Move.from_gtp("A3", player="B"))
     b.play(Move.from_gtp("A9", player="B"))
     assert 2 == len(self.nonempty_chains(b))
     assert 3 == len(b.stones)
     assert 0 == len(b.prisoners)
コード例 #3
0
ファイル: test_board.py プロジェクト: serpiente/katrain
 def test_collide(self):
     b = Game(MockKaTrain(), MockEngine())
     b.play(Move.from_gtp("B9", player="B"))
     with pytest.raises(IllegalMoveException):
         b.play(Move.from_gtp("B9", player="W"))
     assert 1 == len(self.nonempty_chains(b))
     assert 1 == len(b.stones)
     assert 0 == len(b.prisoners)
コード例 #4
0
ファイル: test_board.py プロジェクト: polokk/katrain
 def test_merge(self, new_game):
     b = Game(MockKaTrain(force_package_config=True), MockEngine(), move_tree=new_game)
     b.play(Move.from_gtp("B9", player="B"))
     b.play(Move.from_gtp("A3", player="B"))
     b.play(Move.from_gtp("A9", player="B"))
     assert 2 == len(self.nonempty_chains(b))
     assert 3 == len(b.stones)
     assert 0 == len(b.prisoners)
コード例 #5
0
    def test_ko(self, new_game):
        b = Game(MockKaTrain(force_package_config=True),
                 MockEngine(),
                 move_tree=new_game)
        for move in ["A2", "B1"]:
            b.play(Move.from_gtp(move, player="B"))

        for move in ["B2", "C1"]:
            b.play(Move.from_gtp(move, player="W"))
        b.play(Move.from_gtp("A1", player="W"))
        assert 4 == len(self.nonempty_chains(b))
        assert 4 == len(b.stones)
        assert 1 == len(b.prisoners)
        with pytest.raises(IllegalMoveException) as exc:
            b.play(Move.from_gtp("B1", player="B"))
        assert "Ko" in str(exc.value)

        b.play(Move.from_gtp("B1", player="B"), ignore_ko=True)
        assert 2 == len(b.prisoners)

        with pytest.raises(IllegalMoveException) as exc:
            b.play(Move.from_gtp("A1", player="W"))

        b.play(Move.from_gtp("F1", player="W"))
        b.play(Move(coords=None, player="B"))
        b.play(Move.from_gtp("A1", player="W"))
        assert 3 == len(b.prisoners)
コード例 #6
0
    def draw_pv(self, pv, node, up_to_move):
        katrain = self.katrain
        next_last_player = [node.next_player, node.player]
        stone_color = STONE_COLORS
        cn = katrain.game.current_node
        if node != cn and node.parent != cn:
            hide_node = cn
            while hide_node and hide_node.move and hide_node != node:
                if not hide_node.move.is_pass:
                    self.draw_stone(*hide_node.move.coords, [0.85, 0.68, 0.40, 0.8])  # board coloured dot
                hide_node = hide_node.parent
        for i, gtpmove in enumerate(pv):
            if i > up_to_move:
                return
            move_player = next_last_player[i % 2]
            opp_player = next_last_player[1 - i % 2]
            coords = Move.from_gtp(gtpmove).coords
            if coords is None:  # tee-hee
                sizefac = katrain.board_controls.pass_btn.size[1] / 2 / self.stone_size
                board_coords = [
                    katrain.board_controls.pass_btn.pos[0]
                    + katrain.board_controls.pass_btn.size[0]
                    + self.stone_size * sizefac,
                    katrain.board_controls.pass_btn.pos[1] + katrain.board_controls.pass_btn.size[1] / 2,
                ]
            else:
                board_coords = (self.gridpos_x[coords[0]], self.gridpos_y[coords[1]])
                sizefac = 1

            draw_circle(board_coords, self.stone_size * sizefac, stone_color[move_player])
            Color(*stone_color[opp_player])
            draw_text(pos=board_coords, text=str(i + 1), font_size=self.grid_size * sizefac / 1.45, font_name="Roboto")
コード例 #7
0
ファイル: test_board.py プロジェクト: polokk/katrain
 def test_snapback(self, new_game):
     b = Game(MockKaTrain(force_package_config=True), MockEngine(), move_tree=new_game)
     for move in ["C1", "D1", "E1", "C2", "D3", "E4", "F2", "F3", "F4"]:
         b.play(Move.from_gtp(move, player="B"))
     for move in ["D2", "E2", "C3", "D4", "C4"]:
         b.play(Move.from_gtp(move, player="W"))
     assert 5 == len(self.nonempty_chains(b))
     assert 14 == len(b.stones)
     assert 0 == len(b.prisoners)
     b.play(Move.from_gtp("E3", player="W"))
     assert 4 == len(self.nonempty_chains(b))
     assert 14 == len(b.stones)
     assert 1 == len(b.prisoners)
     b.play(Move.from_gtp("D3", player="B"))
     assert 4 == len(self.nonempty_chains(b))
     assert 12 == len(b.stones)
     assert 4 == len(b.prisoners)
コード例 #8
0
    def draw_pv(self, pv, node, up_to_move):
        katrain = self.katrain
        next_last_player = [
            node.next_player,
            Move.opponent_player(node.next_player)
        ]
        cn = katrain.game.current_node
        if node != cn and node.parent != cn:
            hide_node = cn
            while hide_node and hide_node.move and hide_node != node:
                if not hide_node.move.is_pass:
                    pos = (self.gridpos_x[hide_node.move.coords[0]],
                           self.gridpos_y[hide_node.move.coords[1]])
                    draw_circle(pos, self.stone_size, [0.85, 0.68, 0.40, 0.8])
                hide_node = hide_node.parent
        for i, gtpmove in enumerate(pv):
            if i > up_to_move:
                return
            move_player = next_last_player[i % 2]
            coords = Move.from_gtp(gtpmove).coords
            if coords is None:  # tee-hee
                sizefac = katrain.board_controls.pass_btn.size[
                    1] / 2 / self.stone_size
                board_coords = [
                    katrain.board_controls.pass_btn.pos[0] +
                    katrain.board_controls.pass_btn.size[0] +
                    self.stone_size * sizefac,
                    katrain.board_controls.pass_btn.pos[1] +
                    katrain.board_controls.pass_btn.size[1] / 2,
                ]
            else:
                board_coords = (self.gridpos_x[coords[0]],
                                self.gridpos_y[coords[1]])
                sizefac = 1

            stone_size = self.stone_size * sizefac
            Color(1, 1, 1, 1)
            Rectangle(  # not sure why the -1 here, but seems to center better
                pos=(board_coords[0] - stone_size - 1,
                     board_coords[1] - stone_size),
                size=(2 * stone_size + 1, 2 * stone_size + 1),
                texture=cached_texture(Theme.STONE_TEXTURE[move_player]),
            )
            Color(*Theme.PV_TEXT_COLORS[move_player])
            draw_text(pos=board_coords,
                      text=str(i + 1),
                      font_size=self.grid_size * sizefac / 1.45,
                      font_name="Roboto")
コード例 #9
0
ファイル: badukpan.py プロジェクト: Tony-Liou/katrain
    def on_touch_down(self, touch):
        animating_pv = self.animating_pv
        if "button" in touch.profile:
            if touch.button == "left":
                if self.selecting_region_of_interest:
                    self.update_box_selection(touch, second_point=False)
                else:
                    self.check_next_move_ghost(touch)
            elif touch.button == "middle" and animating_pv:
                pv, node, _, _ = animating_pv
                upto = self.animating_pv_index or 1e9
                for i, gtpmove in enumerate(pv):
                    if i <= upto:  # up to move when scrolling, or all
                        node = node.play(Move.from_gtp(gtpmove, node.next_player))
                        node.analyze(self.katrain.engine, analyze_fast=True)
                self.katrain.controls.move_tree.redraw_tree_trigger()

        if ("button" not in touch.profile) or (touch.button not in ["scrollup", "scrolldown", "middle"]):
            self.set_animating_pv(None, None)  # any click/touch kills PV from label/move
コード例 #10
0
ファイル: test_board.py プロジェクト: polokk/katrain
 def test_capture(self, new_game):
     b = Game(MockKaTrain(force_package_config=True), MockEngine(), move_tree=new_game)
     b.play(Move.from_gtp("A2", player="B"))
     b.play(Move.from_gtp("B1", player="W"))
     b.play(Move.from_gtp("A1", player="W"))
     b.play(Move.from_gtp("C1", player="B"))
     assert 3 == len(self.nonempty_chains(b))
     assert 4 == len(b.stones)
     assert 0 == len(b.prisoners)
     b.play(Move.from_gtp("B2", player="B"))
     assert 2 == len(self.nonempty_chains(b))
     assert 3 == len(b.stones)
     assert 2 == len(b.prisoners)
     b.play(Move.from_gtp("B1", player="B"))
     with pytest.raises(IllegalMoveException, match="Single stone suicide"):
         b.play(Move.from_gtp("A1", player="W"))
     assert 1 == len(self.nonempty_chains(b))
     assert 4 == len(b.stones)
     assert 2 == len(b.prisoners)
コード例 #11
0
ファイル: test_board.py プロジェクト: serpiente/katrain
 def test_capture(self):
     b = Game(MockKaTrain(), MockEngine())
     b.play(Move.from_gtp("A2", player="B"))
     b.play(Move.from_gtp("B1", player="W"))
     b.play(Move.from_gtp("A1", player="W"))
     b.play(Move.from_gtp("C1", player="B"))
     assert 3 == len(self.nonempty_chains(b))
     assert 4 == len(b.stones)
     assert 0 == len(b.prisoners)
     b.play(Move.from_gtp("B2", player="B"))
     assert 2 == len(self.nonempty_chains(b))
     assert 3 == len(b.stones)
     assert 2 == len(b.prisoners)
     b.play(Move.from_gtp("B1", player="B"))
     with pytest.raises(IllegalMoveException) as exc:
         b.play(Move.from_gtp("A1", player="W"))
     assert "Suicide" in str(exc.value)
     assert 1 == len(self.nonempty_chains(b))
     assert 4 == len(b.stones)
     assert 2 == len(b.prisoners)
コード例 #12
0
ファイル: test_board.py プロジェクト: polokk/katrain
    def test_suicide(self):
        rulesets_to_test = BaseEngine.RULESETS_ABBR + [('{"suicide":true}', ""), ('{"suicide":false}', "")]
        for shortrule, _ in rulesets_to_test:
            new_game = GameNode(properties={"SZ": 19, "RU": shortrule})
            b = Game(MockKaTrain(force_package_config=True), MockEngine(), move_tree=new_game)
            b.play(Move.from_gtp("A18", player="B"))
            b.play(Move.from_gtp("B18", player="B"))
            b.play(Move.from_gtp("C19", player="B"))
            b.play(Move.from_gtp("A19", player="W"))
            assert 4 == len(b.stones)
            assert 0 == len(b.prisoners)

            if shortrule in ["tt", "nz", '{"suicide":true}']:
                b.play(Move.from_gtp("B19", player="W"))
                assert 3 == len(b.stones)
                assert 2 == len(b.prisoners)
            else:
                with pytest.raises(IllegalMoveException, match="Suicide"):
                    b.play(Move.from_gtp("B19", player="W"))
                assert 4 == len(b.stones)
                assert 0 == len(b.prisoners)
コード例 #13
0
ファイル: badukpan.py プロジェクト: serpiente/katrain
    def draw_hover_contents(self, *_args):
        ghost_alpha = GHOST_ALPHA
        katrain = self.katrain
        game_ended = katrain.game.ended
        current_node = katrain.game.current_node
        player, next_player = current_node.player, current_node.next_player
        stone_color = STONE_COLORS
        lock_ai = self.trainer_config[
            "lock_ai"] and self.katrain.play_analyze_mode == MODE_PLAY

        self.canvas.after.clear()
        with self.canvas.after:
            self.active_pv_moves = []

            # children of current moves in undo / review
            alpha = GHOST_ALPHA
            if katrain.analysis_controls.show_children.active:
                for child_node in current_node.children:
                    points_lost = child_node.points_lost
                    move = child_node.move
                    if move and move.coords is not None:
                        if points_lost is None:
                            evalcol = None
                        else:
                            evalcol = copy.copy(self.eval_color(points_lost))
                            evalcol[3] = alpha
                        if child_node.analysis_ready:
                            self.active_pv_moves.append(
                                (move.coords, [move.gtp()] +
                                 child_node.candidate_moves[0]["pv"],
                                 current_node))
                        scale = CHILD_SCALE
                        self.draw_stone(
                            move.coords[0],
                            move.coords[1],
                            (*stone_color[move.player][:3], alpha),
                            None,
                            None,
                            evalcol,
                            evalscale=scale,
                            scale=scale,
                        )

            # hints or PV
            if katrain.analysis_controls.hints.active and not game_ended and not lock_ai:
                hint_moves = current_node.candidate_moves
                for i, move_dict in enumerate(hint_moves):
                    move = Move.from_gtp(move_dict["move"])
                    if move.coords is not None:
                        alpha, scale = GHOST_ALPHA, 1.0
                        if i == 0:
                            alpha += TOP_MOVE_ALPHA
                        elif move_dict[
                                "visits"] < VISITS_FRAC_SMALL * hint_moves[0][
                                    "visits"]:
                            scale = 0.8
                        if "pv" in move_dict:
                            self.active_pv_moves.append(
                                (move.coords, move_dict["pv"], current_node))
                        else:
                            katrain.log(
                                f"PV missing for move_dict {move_dict}",
                                OUTPUT_DEBUG)
                        self.draw_stone(
                            move.coords[0],
                            move.coords[1],
                            [
                                *self.eval_color(move_dict["pointsLost"])[:3],
                                alpha
                            ],
                            scale=scale,
                        )

            # hover next move ghost stone
            if self.ghost_stone:
                self.draw_stone(*self.ghost_stone,
                                (*stone_color[next_player], ghost_alpha))

            animating_pv = self.animating_pv
            if animating_pv:
                pv, node, start_time, _ = animating_pv
                delay = self.trainer_config.get("anim_pv_time", 0.5)
                up_to_move = (time.time() - start_time) / delay
                self.draw_pv(pv, node, up_to_move)
コード例 #14
0
ファイル: badukpan.py プロジェクト: Tony-Liou/katrain
    def draw_hover_contents(self, *_args):
        ghost_alpha = Theme.GHOST_ALPHA
        katrain = self.katrain
        game_ended = katrain.game.end_result
        current_node = katrain.game.current_node
        next_player = current_node.next_player

        board_size_x, board_size_y = katrain.game.board_size
        if len(self.gridpos_x) < board_size_x or len(self.gridpos_y) < board_size_y:
            return  # race condition

        with self.canvas.after:
            self.canvas.after.clear()

            self.active_pv_moves = []
            # hints or PV
            hint_moves = []
            if (
                katrain.analysis_controls.hints.active
                and not katrain.analysis_controls.policy.active
                and not game_ended
            ):
                hint_moves = current_node.candidate_moves
            elif katrain.controls.status_state[1] == STATUS_TEACHING:  # show score hint for teaching  undo
                hint_moves = [
                    m
                    for m in current_node.candidate_moves
                    for c in current_node.children
                    if c.move and c.auto_undo and c.move.gtp() == m["move"]
                ]

            top_move_coords = None
            child_moves = {c.move.gtp() for c in current_node.children if c.move}
            if hint_moves:
                low_visits_threshold = katrain.config("trainer/low_visits", 25)
                top_moves_show = [
                    opt
                    for opt in [
                        katrain.config("trainer/top_moves_show"),
                        katrain.config("trainer/top_moves_show_secondary"),
                    ]
                    if opt in TOP_MOVE_OPTIONS and opt != TOP_MOVE_NOTHING
                ]
                for move_dict in hint_moves:
                    move = Move.from_gtp(move_dict["move"])
                    if move.coords is not None:
                        engine_best_move = move_dict.get("order", 99) == 0
                        scale = Theme.HINT_SCALE
                        text_on = True
                        alpha = Theme.HINTS_ALPHA
                        if (
                            move_dict["visits"] < low_visits_threshold
                            and not engine_best_move
                            and not move_dict["move"] in child_moves
                        ):
                            scale = Theme.UNCERTAIN_HINT_SCALE
                            text_on = False
                            alpha = Theme.HINTS_LO_ALPHA

                        if "pv" in move_dict:
                            self.active_pv_moves.append((move.coords, move_dict["pv"], current_node))
                        else:
                            katrain.log(f"PV missing for move_dict {move_dict}", OUTPUT_DEBUG)
                        evalsize = self.stone_size * scale
                        evalcol = self.eval_color(move_dict["pointsLost"])
                        if text_on and top_moves_show:  # remove grid lines using a board colored circle
                            draw_circle(
                                (self.gridpos_x[move.coords[0]], self.gridpos_y[move.coords[1]]),
                                self.stone_size * scale * 0.98,
                                Theme.APPROX_BOARD_COLOR,
                            )

                        Color(*evalcol[:3], alpha)
                        Rectangle(
                            pos=(self.gridpos_x[move.coords[0]] - evalsize, self.gridpos_y[move.coords[1]] - evalsize),
                            size=(2 * evalsize, 2 * evalsize),
                            texture=cached_texture(Theme.TOP_MOVE_TEXTURE),
                        )
                        if text_on and top_moves_show:  # TODO: faster if not sized?
                            keys = {"size": self.grid_size / 3, "smallsize": self.grid_size / 3.33}
                            player_sign = current_node.player_sign(next_player)
                            if len(top_moves_show) == 1:
                                fmt = "[size={size:.0f}]{" + top_moves_show[0] + "}[/size]"
                            else:
                                fmt = (
                                    "[size={size:.0f}]{"
                                    + top_moves_show[0]
                                    + "}[/size]\n[size={smallsize:.0f}]{"
                                    + top_moves_show[1]
                                    + "}[/size]"
                                )

                            keys[TOP_MOVE_DELTA_SCORE] = (
                                "0.0" if -0.05 < move_dict["pointsLost"] < 0.05 else f"{-move_dict['pointsLost']:+.1f}"
                            )
                            #                           def fmt_maybe_missing(arg,sign,digits=1):
                            #                               return str(round(sign*arg,digits)) if arg is not None else "N/A"

                            keys[TOP_MOVE_SCORE] = f"{player_sign * move_dict['scoreLead']:.1f}"
                            winrate = move_dict["winrate"] if player_sign == 1 else 1 - move_dict["winrate"]
                            keys[TOP_MOVE_WINRATE] = f"{winrate*100:.1f}"
                            keys[TOP_MOVE_DELTA_WINRATE] = f"{-move_dict['winrateLost']:+.1%}"
                            keys[TOP_MOVE_VISITS] = format_visits(move_dict["visits"])
                            #                            keys[TOP_MOVE_UTILITY] = fmt_maybe_missing( move_dict.get('utility'),player_sign,2)
                            #                            keys[TOP_MOVE_UTILITYLCB] = fmt_maybe_missing(move_dict.get('utilityLcb'),player_sign,2)
                            #                            keys[TOP_MOVE_SCORE_STDDEV] =fmt_maybe_missing(move_dict.get('scoreStdev'),1)
                            Color(*Theme.HINT_TEXT_COLOR)
                            draw_text(
                                pos=(self.gridpos_x[move.coords[0]], self.gridpos_y[move.coords[1]]),
                                text=fmt.format(**keys),
                                font_name="Roboto",
                                markup=True,
                                line_height=0.85,
                                halign="center",
                            )

                        if engine_best_move:
                            top_move_coords = move.coords
                            Color(*Theme.TOP_MOVE_BORDER_COLOR)
                            Line(
                                circle=(
                                    self.gridpos_x[move.coords[0]],
                                    self.gridpos_y[move.coords[1]],
                                    self.stone_size - dp(1.2),
                                ),
                                width=dp(1.2),
                            )

            # children of current moves in undo / review
            if katrain.analysis_controls.show_children.active:
                for child_node in current_node.children:
                    move = child_node.move
                    if move and move.coords is not None:
                        if child_node.analysis_exists:
                            self.active_pv_moves.append(
                                (move.coords, [move.gtp()] + child_node.candidate_moves[0]["pv"], current_node)
                            )

                        if move.coords != top_move_coords:  # for contrast
                            dashed_width = 18
                            Color(*Theme.NEXT_MOVE_DASH_CONTRAST_COLORS[child_node.player])
                            Line(
                                circle=(
                                    self.gridpos_x[move.coords[0]],
                                    self.gridpos_y[move.coords[1]],
                                    self.stone_size - dp(1.2),
                                ),
                                width=dp(1.2),
                            )
                        else:
                            dashed_width = 10
                        Color(*Theme.STONE_COLORS[child_node.player])
                        for s in range(0, 360, 30):
                            Line(
                                circle=(
                                    self.gridpos_x[move.coords[0]],
                                    self.gridpos_y[move.coords[1]],
                                    self.stone_size - dp(1.2),
                                    s,
                                    s + dashed_width,
                                ),
                                width=dp(1.2),
                            )

            if self.selecting_region_of_interest and len(self.region_of_interest) == 4:
                x1, x2, y1, y2 = self.region_of_interest
                self.draw_roi_box([min(x1, x2), max(x1, x2), min(y1, y2), max(y1, y2)], width=dp(2))
            else:
                # hover next move ghost stone
                if self.ghost_stone:
                    self.draw_stone(*self.ghost_stone, next_player, alpha=ghost_alpha)

                animating_pv = self.animating_pv
                if animating_pv:
                    pv, node, start_time, _ = animating_pv
                    up_to_move = self.get_animate_pv_index()
                    self.draw_pv(pv, node, up_to_move)

                if getattr(self.katrain.game, "region_of_interest", None):
                    self.draw_roi_box(self.katrain.game.region_of_interest, width=dp(1.25))

            # pass circle
            if current_node.is_pass or game_ended:
                if game_ended:
                    text = game_ended
                    katrain.controls.timer.paused = True
                else:
                    text = i18n._("board-pass")
                Color(*Theme.PASS_CIRCLE_COLOR)
                center = (self.gridpos_x[int(board_size_x / 2)], self.gridpos_y[int(board_size_y / 2)])
                size = min(self.width, self.height) * 0.227
                Ellipse(pos=(center[0] - size / 2, center[1] - size / 2), size=(size, size))
                Color(*Theme.PASS_CIRCLE_TEXT_COLOR)
                draw_text(pos=center, text=text, font_size=size * 0.25, halign="center")
コード例 #15
0
def generate_ai_move(game: Game, ai_mode: str,
                     ai_settings: Dict) -> Tuple[Move, GameNode]:
    cn = game.current_node

    if ai_mode == AI_HANDICAP:
        pda = ai_settings["pda"]
        if ai_settings["automatic"]:
            n_handicaps = len(game.root.get_list_property("AB", []))
            MOVE_VALUE = 14  # could be rules dependent
            b_stones_advantage = max(
                n_handicaps - 1, 0) - (cn.komi - MOVE_VALUE / 2) / MOVE_VALUE
            pda = min(3, max(
                -3, -b_stones_advantage *
                (3 /
                 8)))  # max PDA at 8 stone adv, normal 9 stone game is 8.46
        handicap_analysis = request_ai_analysis(
            game, cn, {
                "playoutDoublingAdvantage": pda,
                "playoutDoublingAdvantagePla": "BLACK"
            })
        if not handicap_analysis:
            game.katrain.log(f"Error getting handicap-based move",
                             OUTPUT_ERROR)
            ai_mode = AI_DEFAULT

    while not cn.analysis_complete:
        time.sleep(0.01)
        game.engines[cn.next_player].check_alive(exception_if_dead=True)

    ai_thoughts = ""
    if (ai_mode
            in AI_STRATEGIES_POLICY) and cn.policy:  # pure policy based move
        policy_moves = cn.policy_ranking
        pass_policy = cn.policy[-1]
        # dont make it jump around for the last few sensible non pass moves
        top_5_pass = any([polmove[1].is_pass for polmove in policy_moves[:5]])

        size = game.board_size
        policy_grid = var_to_grid(cn.policy, size)  # type: List[List[float]]
        top_policy_move = policy_moves[0][1]
        ai_thoughts += f"Using policy based strategy, base top 5 moves are {fmt_moves(policy_moves[:5])}. "
        if (ai_mode == AI_POLICY and cn.depth <= ai_settings["opening_moves"]
            ) or (ai_mode in [AI_LOCAL, AI_TENUKI]
                  and not (cn.move and cn.move.coords)):
            ai_mode = AI_WEIGHTED
            ai_thoughts += f"Strategy override, using policy-weighted strategy instead. "
            ai_settings = {
                "pick_override": 0.9,
                "weaken_fac": 1,
                "lower_bound": 0.02
            }

        if top_5_pass:
            aimove = top_policy_move
            ai_thoughts += "Playing top one because one of them is pass."
        elif ai_mode == AI_POLICY:
            aimove = top_policy_move
            ai_thoughts += f"Playing top policy move {aimove.gtp()}."
        else:  # weighted or pick-based
            legal_policy_moves = [(pol, mv) for pol, mv in policy_moves
                                  if not mv.is_pass and pol > 0]
            board_squares = size[0] * size[1]
            if ai_mode == AI_RANK:  # calibrated, override from 0.8 at start to ~0.4 at full board
                override = 0.8 * (
                    1 - 0.5 *
                    (board_squares - len(legal_policy_moves)) / board_squares)
                overridetwo = 0.85 + max(0, 0.02 *
                                         (ai_settings["kyu_rank"] - 8))
            else:
                override = ai_settings["pick_override"]
                overridetwo = 1.0

            if policy_moves[0][0] > override:
                aimove = top_policy_move
                ai_thoughts += f"Top policy move has weight > {override:.1%}, so overriding other strategies."
            elif policy_moves[0][0] + policy_moves[1][0] > overridetwo:
                aimove = top_policy_move
                ai_thoughts += (
                    f"Top two policy moves have cumulative weight > {overridetwo:.1%}, so overriding other strategies."
                )
            elif ai_mode == AI_WEIGHTED:
                aimove, ai_thoughts = policy_weighted_move(
                    policy_moves, ai_settings["lower_bound"],
                    ai_settings["weaken_fac"])
            elif ai_mode in AI_STRATEGIES_PICK:

                if ai_mode != AI_RANK:
                    n_moves = max(
                        1,
                        int(ai_settings["pick_frac"] *
                            len(legal_policy_moves) + ai_settings["pick_n"]))
                else:
                    orig_calib_avemodrank = 0.063015 + 0.7624 * board_squares / (
                        10**(-0.05737 * ai_settings["kyu_rank"] + 1.9482))
                    norm_leg_moves = len(legal_policy_moves) / board_squares
                    modified_calib_avemodrank = (
                        0.3931 + 0.6559 * norm_leg_moves * math.exp(
                            -1 *
                            (3.002 * norm_leg_moves * norm_leg_moves -
                             norm_leg_moves - 0.034889 *
                             ai_settings["kyu_rank"] - 0.5097)**2) - 0.01093 *
                        ai_settings["kyu_rank"]) * orig_calib_avemodrank
                    n_moves = board_squares * norm_leg_moves / (
                        1.31165 * (modified_calib_avemodrank + 1) - 0.082653)
                    n_moves = max(1, round(n_moves))

                if ai_mode in [
                        AI_INFLUENCE, AI_TERRITORY, AI_LOCAL, AI_TENUKI
                ]:
                    if cn.depth > ai_settings["endgame"] * board_squares:
                        weighted_coords = [(pol, 1, *mv.coords)
                                           for pol, mv in legal_policy_moves]
                        x_ai_thoughts = (
                            f"Generated equal weights as move number >= {ai_settings['endgame'] * size[0] * size[1]}. "
                        )
                        n_moves = int(
                            max(n_moves,
                                len(legal_policy_moves) // 2))
                    elif ai_mode in [AI_INFLUENCE, AI_TERRITORY]:
                        weighted_coords, x_ai_thoughts = generate_influence_territory_weights(
                            ai_mode, ai_settings, policy_grid, size)
                    else:  # ai_mode in [AI_LOCAL, AI_TENUKI]
                        weighted_coords, x_ai_thoughts = generate_local_tenuki_weights(
                            ai_mode, ai_settings, policy_grid, cn, size)
                    ai_thoughts += x_ai_thoughts
                else:  # ai_mode in [AI_PICK, AI_RANK]:
                    weighted_coords = [(policy_grid[y][x], 1, x, y)
                                       for x in range(size[0])
                                       for y in range(size[1])
                                       if policy_grid[y][x] > 0]

                pick_moves = weighted_selection_without_replacement(
                    weighted_coords, n_moves)
                ai_thoughts += f"Picked {min(n_moves,len(weighted_coords))} random moves according to weights. "

                if pick_moves:
                    new_top = [
                        (p, Move((x, y), player=cn.next_player))
                        for p, wt, x, y in heapq.nlargest(5, pick_moves)
                    ]
                    aimove = new_top[0][1]
                    ai_thoughts += f"Top 5 among these were {fmt_moves(new_top)} and picked top {aimove.gtp()}. "
                    if new_top[0][0] < pass_policy:
                        ai_thoughts += f"But found pass ({pass_policy:.2%} to be higher rated than {aimove.gtp()} ({new_top[0][0]:.2%}) so will play top policy move instead."
                        aimove = top_policy_move
                else:
                    aimove = top_policy_move
                    ai_thoughts += f"Pick policy strategy {ai_mode} failed to find legal moves, so is playing top policy move {aimove.gtp()}."
            else:
                raise ValueError(f"Unknown Policy-based AI mode {ai_mode}")
    else:  # Engine based move
        candidate_ai_moves = cn.candidate_moves
        if ai_mode == AI_HANDICAP:
            candidate_ai_moves = handicap_analysis["moveInfos"]

        top_cand = Move.from_gtp(candidate_ai_moves[0]["move"],
                                 player=cn.next_player)
        if top_cand.is_pass and ai_mode not in [
                AI_DEFAULT,
                AI_HANDICAP,
        ]:  # don't play suicidal to balance score
            aimove = top_cand
            ai_thoughts += f"Top move is pass, so passing regardless of strategy. "
        else:
            if ai_mode == AI_JIGO:
                sign = cn.player_sign(cn.next_player)
                jigo_move = min(
                    candidate_ai_moves,
                    key=lambda move: abs(sign * move["scoreLead"] -
                                         ai_settings["target_score"]))
                aimove = Move.from_gtp(jigo_move["move"],
                                       player=cn.next_player)
                ai_thoughts += f"Jigo strategy found {len(candidate_ai_moves)} candidate moves (best {top_cand.gtp()}) and chose {aimove.gtp()} as closest to 0.5 point win"
            elif ai_mode == AI_SCORELOSS:
                c = ai_settings["strength"]
                moves = [(
                    d["pointsLost"],
                    math.exp(min(200, -c * max(0, d["pointsLost"]))),
                    Move.from_gtp(d["move"], player=cn.next_player),
                ) for d in candidate_ai_moves]
                topmove = weighted_selection_without_replacement(moves, 1)[0]
                aimove = topmove[2]
                ai_thoughts += f"ScoreLoss strategy found {len(candidate_ai_moves)} candidate moves (best {top_cand.gtp()}) and chose {aimove.gtp()} (weight {topmove[1]:.3f}, point loss {topmove[0]:.1f}) based on score weights."
            elif ai_mode in [AI_SIMPLE_OWNERSHIP, AI_SETTLE_STONES]:
                stones_with_player = {(*s.coords, s.player)
                                      for s in game.stones}
                next_player_sign = cn.player_sign(cn.next_player)
                if ai_mode == AI_SIMPLE_OWNERSHIP:

                    def settledness(d, player_sign, player):
                        return sum([
                            abs(o) for o in d["ownership"]
                            if player_sign * o > 0
                        ])

                else:
                    board_size_x, board_size_y = game.board_size

                    def settledness(d, player_sign, player):
                        ownership_grid = var_to_grid(
                            d["ownership"], (board_size_x, board_size_y))
                        return sum([
                            abs(ownership_grid[s.coords[0]][s.coords[1]])
                            for s in game.stones if s.player == player
                        ])

                def is_attachment(move):
                    if move.is_pass:
                        return False
                    attach_opponent_stones = sum(
                        (move.coords[0] + dx, move.coords[1] + dy,
                         cn.player) in stones_with_player for dx in [-1, 0, 1]
                        for dy in [-1, 0, 1] if abs(dx) + abs(dy) == 1)
                    nearby_own_stones = sum(
                        (move.coords[0] + dx, move.coords[1] + dy,
                         cn.next_player) in stones_with_player
                        for dx in [-2, 0, 1, 2] for dy in [-2 - 1, 0, 1, 2]
                        if abs(dx) + abs(dy) <= 2  # allows clamps/jumps
                    )
                    return attach_opponent_stones >= 1 and nearby_own_stones == 0

                def is_tenuki(d):
                    return not d.is_pass and not any(
                        not node or not node.move or node.move.is_pass or max(
                            abs(last_c - cand_c) for last_c, cand_c in zip(
                                node.move.coords, d.coords)) < 5
                        for node in [cn, cn.parent])

                moves_with_settledness = sorted(
                    [(
                        move,
                        settledness(d, next_player_sign, cn.next_player),
                        settledness(d, -next_player_sign, cn.player),
                        is_attachment(move),
                        is_tenuki(move),
                        d,
                    ) for d in candidate_ai_moves
                     if d["pointsLost"] < ai_settings["max_points_lost"]
                     and "ownership" in d and (
                         d["order"] <= 1
                         or d["visits"] >= ai_settings.get("min_visits", 1))
                     for move in
                     [Move.from_gtp(d["move"], player=cn.next_player)]
                     if not (move.is_pass and d["pointsLost"] > 0.75)],
                    key=lambda t: t[5]["pointsLost"] + ai_settings[
                        "attach_penalty"] * t[3] + ai_settings[
                            "tenuki_penalty"] * t[4] - ai_settings[
                                "settled_weight"] *
                    (t[1] + ai_settings["opponent_fac"] * t[2]),
                )
                if moves_with_settledness:
                    cands = [
                        f"{move.gtp()} ({d['pointsLost']:.1f} pt lost, {d['visits']} visits, {settled:.1f} settledness, {oppsettled:.1f} opponent settledness{', attachment' if isattach else ''}{', tenuki' if istenuki else ''})"
                        for move, settled, oppsettled, isattach, istenuki, d in
                        moves_with_settledness[:5]
                    ]
                    ai_thoughts += f"{ai_mode} strategy. Top 5 Candidates {', '.join(cands)} "
                    aimove = moves_with_settledness[0][0]
                else:
                    raise (Exception(
                        "No moves found - are you using an older KataGo with no per-move ownership info?"
                    ))
            else:
                if ai_mode not in [AI_DEFAULT, AI_HANDICAP]:
                    game.katrain.log(
                        f"Unknown AI mode {ai_mode} or policy missing, using default.",
                        OUTPUT_INFO)
                    ai_thoughts += f"Strategy {ai_mode} not found or unexpected fallback."
                aimove = top_cand
                if ai_mode == AI_HANDICAP:
                    ai_thoughts += f"Handicap strategy found {len(candidate_ai_moves)} moves returned from the engine and chose {aimove.gtp()} as top move. PDA based score {cn.format_score(handicap_analysis['rootInfo']['scoreLead'])} and win rate {cn.format_winrate(handicap_analysis['rootInfo']['winrate'])}"
                else:
                    ai_thoughts += f"Default strategy found {len(candidate_ai_moves)} moves returned from the engine and chose {aimove.gtp()} as top move"
    game.katrain.log(f"AI thoughts: {ai_thoughts}", OUTPUT_DEBUG)
    played_node = game.play(aimove)
    played_node.ai_thoughts = ai_thoughts
    return aimove, played_node
コード例 #16
0
ファイル: ai.py プロジェクト: yibit/katrain
def generate_ai_move(game: Game, ai_mode: str,
                     ai_settings: Dict) -> Tuple[Move, GameNode]:
    cn = game.current_node

    if ai_mode == AI_HANDICAP:
        pda = ai_settings["pda"]
        if ai_settings["automatic"]:
            n_handicaps = len(game.root.get_list_property("AB", []))
            MOVE_VALUE = 14  # could be rules dependent
            b_stones_advantage = max(
                n_handicaps - 1, 0) - (cn.komi - MOVE_VALUE / 2) / MOVE_VALUE
            pda = min(3, max(
                -3, -b_stones_advantage *
                (3 /
                 8)))  # max PDA at 8 stone adv, normal 9 stone game is 8.46
        handicap_analysis = request_ai_analysis(
            game, cn, {
                "playoutDoublingAdvantage": pda,
                "playoutDoublingAdvantagePla": "BLACK"
            })
        if not handicap_analysis:
            game.katrain.log(f"Error getting handicap-based move",
                             OUTPUT_ERROR)
            ai_mode = AI_DEFAULT

    while not cn.analysis_ready:
        time.sleep(0.01)
        game.engines[cn.next_player].check_alive(exception_if_dead=True)

    ai_thoughts = ""
    if (ai_mode
            in AI_STRATEGIES_POLICY) and cn.policy:  # pure policy based move
        policy_moves = cn.policy_ranking
        pass_policy = cn.policy[-1]
        # dont make it jump around for the last few sensible non pass moves
        top_5_pass = any([polmove[1].is_pass for polmove in policy_moves[:5]])

        size = game.board_size
        policy_grid = var_to_grid(cn.policy, size)  # type: List[List[float]]
        top_policy_move = policy_moves[0][1]
        ai_thoughts += f"Using policy based strategy, base top 5 moves are {fmt_moves(policy_moves[:5])}. "
        if (ai_mode == AI_POLICY and cn.depth <= ai_settings["opening_moves"]
            ) or (ai_mode in [AI_LOCAL, AI_TENUKI]
                  and not (cn.move and cn.move.coords)):
            ai_mode = AI_WEIGHTED
            ai_thoughts += f"Strategy override, using policy-weighted strategy instead. "
            ai_settings = {
                "pick_override": 0.9,
                "weaken_fac": 1,
                "lower_bound": 0.02
            }

        if top_5_pass:
            aimove = top_policy_move
            ai_thoughts += "Playing top one because one of them is pass."
        elif ai_mode == AI_POLICY:
            aimove = top_policy_move
            ai_thoughts += f"Playing top policy move {aimove.gtp()}."
        else:  # weighted or pick-based
            legal_policy_moves = [(pol, mv) for pol, mv in policy_moves
                                  if not mv.is_pass and pol > 0]
            board_squares = size[0] * size[1]
            if ai_mode == AI_RANK:  # calibrated, override from 0.8 at start to ~0.4 at full board
                override = 0.8 * (
                    1 - 0.5 *
                    (board_squares - len(legal_policy_moves)) / board_squares)
                overridetwo = 0.85 + max(0, 0.02 *
                                         (ai_settings["kyu_rank"] - 8))
            else:
                override = ai_settings["pick_override"]
                overridetwo = 1.0

            if policy_moves[0][0] > override:
                aimove = top_policy_move
                ai_thoughts += f"Top policy move has weight > {override:.1%}, so overriding other strategies."
            elif policy_moves[0][0] + policy_moves[1][0] > overridetwo:
                aimove = top_policy_move
                ai_thoughts += (
                    f"Top two policy moves have cumulative weight > {overridetwo:.1%}, so overriding other strategies."
                )
            elif ai_mode == AI_WEIGHTED:
                aimove, ai_thoughts = policy_weighted_move(
                    policy_moves, ai_settings["lower_bound"],
                    ai_settings["weaken_fac"])
            elif ai_mode in AI_STRATEGIES_PICK:

                if ai_mode != AI_RANK:
                    n_moves = max(
                        1,
                        int(ai_settings["pick_frac"] *
                            len(legal_policy_moves) + ai_settings["pick_n"]))
                else:
                    orig_calib_avemodrank = 0.063015 + 0.7624 * board_squares / (
                        10**(-0.05737 * ai_settings["kyu_rank"] + 1.9482))
                    norm_leg_moves = len(legal_policy_moves) / board_squares
                    modified_calib_avemodrank = (
                        0.3931 + 0.6559 * norm_leg_moves * math.exp(
                            -1 *
                            (3.002 * norm_leg_moves * norm_leg_moves -
                             norm_leg_moves - 0.034889 *
                             ai_settings["kyu_rank"] - 0.5097)**2) - 0.01093 *
                        ai_settings["kyu_rank"]) * orig_calib_avemodrank
                    n_moves = board_squares * norm_leg_moves / (
                        1.31165 * (modified_calib_avemodrank + 1) - 0.082653)
                    n_moves = max(1, round(n_moves))

                if ai_mode in [
                        AI_INFLUENCE, AI_TERRITORY, AI_LOCAL, AI_TENUKI
                ]:
                    if cn.depth > ai_settings["endgame"] * board_squares:
                        weighted_coords = [(pol, 1, *mv.coords)
                                           for pol, mv in legal_policy_moves]
                        x_ai_thoughts = (
                            f"Generated equal weights as move number >= {ai_settings['endgame'] * size[0] * size[1]}. "
                        )
                        n_moves = int(
                            max(n_moves,
                                len(legal_policy_moves) // 2))
                    elif ai_mode in [AI_INFLUENCE, AI_TERRITORY]:
                        weighted_coords, x_ai_thoughts = generate_influence_territory_weights(
                            ai_mode, ai_settings, policy_grid, size)
                    else:  # ai_mode in [AI_LOCAL, AI_TENUKI]
                        weighted_coords, x_ai_thoughts = generate_local_tenuki_weights(
                            ai_mode, ai_settings, policy_grid, cn, size)
                    ai_thoughts += x_ai_thoughts
                else:  # ai_mode in [AI_PICK, AI_RANK]:
                    weighted_coords = [(policy_grid[y][x], 1, x, y)
                                       for x in range(size[0])
                                       for y in range(size[1])
                                       if policy_grid[y][x] > 0]

                pick_moves = weighted_selection_without_replacement(
                    weighted_coords, n_moves)
                ai_thoughts += f"Picked {min(n_moves,len(weighted_coords))} random moves according to weights. "

                if pick_moves:
                    new_top = [
                        (p, Move((x, y), player=cn.next_player))
                        for p, wt, x, y in heapq.nlargest(5, pick_moves)
                    ]
                    aimove = new_top[0][1]
                    ai_thoughts += f"Top 5 among these were {fmt_moves(new_top)} and picked top {aimove.gtp()}. "
                    if new_top[0][0] < pass_policy:
                        ai_thoughts += f"But found pass ({pass_policy:.2%} to be higher rated than {aimove.gtp()} ({new_top[0][0]:.2%}) so will play top policy move instead."
                        aimove = top_policy_move
                else:
                    aimove = top_policy_move
                    ai_thoughts += f"Pick policy strategy {ai_mode} failed to find legal moves, so is playing top policy move {aimove.gtp()}."
            else:
                raise ValueError(f"Unknown Policy-based AI mode {ai_mode}")
    else:  # Engine based move
        candidate_ai_moves = cn.candidate_moves
        if ai_mode == AI_HANDICAP:
            candidate_ai_moves = handicap_analysis["moveInfos"]

        top_cand = Move.from_gtp(candidate_ai_moves[0]["move"],
                                 player=cn.next_player)
        if top_cand.is_pass and ai_mode not in [
                AI_DEFAULT, AI_HANDICAP
        ]:  # don't play suicidal to balance score
            aimove = top_cand
            ai_thoughts += f"Top move is pass, so passing regardless of strategy. "
        else:
            if ai_mode == AI_JIGO:
                sign = cn.player_sign(cn.next_player)
                jigo_move = min(
                    candidate_ai_moves,
                    key=lambda move: abs(sign * move["scoreLead"] -
                                         ai_settings["target_score"]))
                aimove = Move.from_gtp(jigo_move["move"],
                                       player=cn.next_player)
                ai_thoughts += f"Jigo strategy found {len(candidate_ai_moves)} candidate moves (best {top_cand.gtp()}) and chose {aimove.gtp()} as closest to 0.5 point win"
            elif ai_mode == AI_SCORELOSS:
                c = ai_settings["strength"]
                moves = [(
                    d["pointsLost"],
                    math.exp(min(200, -c * max(0, d["pointsLost"]))),
                    Move.from_gtp(d["move"], player=cn.next_player),
                ) for d in candidate_ai_moves]
                topmove = weighted_selection_without_replacement(moves, 1)[0]
                aimove = topmove[2]
                ai_thoughts += f"ScoreLoss strategy found {len(candidate_ai_moves)} candidate moves (best {top_cand.gtp()}) and chose {aimove.gtp()} (weight {topmove[1]:.3f}, point loss {topmove[0]:.1f}) based on score weights."
            else:
                if ai_mode not in [AI_DEFAULT, AI_HANDICAP]:
                    game.katrain.log(
                        f"Unknown AI mode {ai_mode} or policy missing, using default.",
                        OUTPUT_INFO)
                    ai_thoughts += f"Strategy {ai_mode} not found or unexpected fallback."
                aimove = top_cand
                if ai_mode == AI_HANDICAP:
                    ai_thoughts += f"Handicap strategy found {len(candidate_ai_moves)} moves returned from the engine and chose {aimove.gtp()} as top move. PDA based score {cn.format_score(handicap_analysis['rootInfo']['scoreLead'])} and win rate {cn.format_winrate(handicap_analysis['rootInfo']['winrate'])}"
                else:
                    ai_thoughts += f"Default strategy found {len(candidate_ai_moves)} moves returned from the engine and chose {aimove.gtp()} as top move"
    game.katrain.log(f"AI thoughts: {ai_thoughts}", OUTPUT_DEBUG)
    played_node = game.play(aimove)
    played_node.ai_thoughts = ai_thoughts
    return aimove, played_node
コード例 #17
0
    def draw_hover_contents(self, *_args):
        ghost_alpha = GHOST_ALPHA
        katrain = self.katrain
        game_ended = katrain.game.ended
        current_node = katrain.game.current_node
        player, next_player = current_node.player, current_node.next_player

        board_size_x, board_size_y = katrain.game.board_size
        if len(self.gridpos_x) < board_size_x or len(
                self.gridpos_y) < board_size_y:
            return  # race condition

        with self.canvas.after:
            self.canvas.after.clear()
            self.active_pv_moves = []

            # children of current moves in undo / review
            alpha = GHOST_ALPHA
            if katrain.analysis_controls.show_children.active:
                for child_node in current_node.children:
                    points_lost = child_node.points_lost
                    move = child_node.move
                    if move and move.coords is not None:
                        if points_lost is None:
                            evalcol = None
                        else:
                            evalcol = copy.copy(self.eval_color(points_lost))
                            evalcol[3] = alpha
                        if child_node.analysis_ready:
                            self.active_pv_moves.append(
                                (move.coords, [move.gtp()] +
                                 child_node.candidate_moves[0]["pv"],
                                 current_node))
                        scale = CHILD_SCALE
                        self.draw_stone(
                            move.coords[0],
                            move.coords[1],
                            move.player,
                            alpha=alpha,
                            evalcol=evalcol,
                            evalscale=scale,
                            scale=scale,
                        )

            # hints or PV
            if katrain.analysis_controls.hints.active and not game_ended:
                hint_moves = current_node.candidate_moves
                for i, move_dict in enumerate(hint_moves):
                    move = Move.from_gtp(move_dict["move"])
                    if move.coords is not None:
                        alpha, scale = GHOST_ALPHA, 1.0
                        if move_dict["visits"] < VISITS_FRAC_SMALL * hint_moves[
                                0]["visits"]:
                            scale = 0.8
                        if "pv" in move_dict:
                            self.active_pv_moves.append(
                                (move.coords, move_dict["pv"], current_node))
                        else:
                            katrain.log(
                                f"PV missing for move_dict {move_dict}",
                                OUTPUT_DEBUG)
                        draw_circle(
                            (self.gridpos_x[move.coords[0]],
                             self.gridpos_y[move.coords[1]]),
                            col=[
                                *self.eval_color(move_dict["pointsLost"])[:3],
                                alpha
                            ],
                            r=self.stone_size * scale,
                        )
                        if i == 0:
                            Color(*TOP_MOVE_BORDER_COLOR)
                            Line(
                                circle=(
                                    self.gridpos_x[move.coords[0]],
                                    self.gridpos_y[move.coords[1]],
                                    self.stone_size * scale - 1.2,
                                ),
                                width=dp(1.2),
                            )

            # hover next move ghost stone
            if self.ghost_stone:
                self.draw_stone(*self.ghost_stone,
                                next_player,
                                alpha=ghost_alpha)

            animating_pv = self.animating_pv
            if animating_pv:
                pv, node, start_time, _ = animating_pv
                delay = self.trainer_config.get("anim_pv_time", 0.5)
                up_to_move = (time.time() - start_time) / delay
                self.draw_pv(pv, node, up_to_move)
コード例 #18
0
def generate_ai_move(game: Game, ai_mode: str,
                     ai_settings: Dict) -> Tuple[Move, GameNode]:
    cn = game.current_node
    while not cn.analysis_ready:
        time.sleep(0.01)
        game.engines[cn.next_player].check_alive(exception_if_dead=True)

    ai_thoughts = ""
    if (ai_mode
            in AI_STRATEGIES_POLICY) and cn.policy:  # pure policy based move
        policy_moves = cn.policy_ranking
        pass_policy = cn.policy[-1]
        # dont make it jump around for the last few sensible non pass moves
        top_5_pass = any([polmove[1].is_pass for polmove in policy_moves[:5]])

        size = game.board_size
        policy_grid = var_to_grid(cn.policy, size)  # type: List[List[float]]
        top_policy_move = policy_moves[0][1]
        ai_thoughts += f"Using policy based strategy, base top 5 moves are {fmt_moves(policy_moves[:5])}. "
        if (ai_mode == AI_POLICY and cn.depth <= ai_settings["opening_moves"]
            ) or (ai_mode in [AI_LOCAL, AI_TENUKI]
                  and not (cn.move and cn.move.coords)):
            ai_mode = AI_WEIGHTED
            ai_thoughts += f"Strategy override, using policy-weighted strategy instead. "
            ai_settings = {
                "pick_override": 0.9,
                "weaken_fac": 1,
                "lower_bound": 0.02
            }

        if top_5_pass:
            aimove = top_policy_move
            ai_thoughts += "Playing top one because one of them is pass."
        elif ai_mode == AI_POLICY:
            aimove = top_policy_move
            ai_thoughts += f"Playing top policy move {aimove.gtp()}."
        else:  # weighted or pick-based
            legal_policy_moves = [(pol, mv) for pol, mv in policy_moves
                                  if not mv.is_pass and pol > 0]
            board_squares = size[0] * size[1]
            if ai_mode == AI_RANK:  # calibrated, override from 0.8 at start to ~0.4 at full board
                override = 0.8 * (
                    1 - 0.5 *
                    (board_squares - len(legal_policy_moves)) / board_squares)
            else:
                override = ai_settings["pick_override"]

            if policy_moves[0][0] > override:
                aimove = top_policy_move
                ai_thoughts += f"Top policy move has weight > {override:.1%}, so overriding other strategies."
            elif ai_mode == AI_WEIGHTED:
                aimove, ai_thoughts = policy_weighted_move(
                    policy_moves, ai_settings["lower_bound"],
                    ai_settings["weaken_fac"])
            elif ai_mode in AI_STRATEGIES_PICK:

                if ai_mode != AI_RANK:
                    n_moves = int(ai_settings["pick_frac"] *
                                  len(legal_policy_moves) +
                                  ai_settings["pick_n"])
                else:
                    n_moves = int(
                        round(
                            board_squares / 361 *
                            10**(-0.05737 * ai_settings["kyu_rank"] + 1.9482)))

                if ai_mode in [
                        AI_INFLUENCE, AI_TERRITORY, AI_LOCAL, AI_TENUKI
                ]:
                    if cn.depth > ai_settings["endgame"] * board_squares:
                        weighted_coords = [(pol, 1, *mv.coords)
                                           for pol, mv in legal_policy_moves]
                        x_ai_thoughts = (
                            f"Generated equal weights as move number >= {ai_settings['endgame'] * size[0] * size[1]}. "
                        )
                        n_moves = int(
                            max(n_moves, 0.5 * len(legal_policy_moves)))
                    elif ai_mode in [AI_INFLUENCE, AI_TERRITORY]:
                        weighted_coords, x_ai_thoughts = generate_influence_territory_weights(
                            ai_mode, ai_settings, policy_grid, size)
                    else:  # ai_mode in [AI_LOCAL, AI_TENUKI]
                        weighted_coords, x_ai_thoughts = generate_local_tenuki_weights(
                            ai_mode, ai_settings, policy_grid, cn, size)
                    ai_thoughts += x_ai_thoughts
                else:  # ai_mode in [AI_PICK, AI_RANK]:
                    weighted_coords = [(policy_grid[y][x], 1, x, y)
                                       for x in range(size[0])
                                       for y in range(size[1])
                                       if policy_grid[y][x] > 0]

                pick_moves = weighted_selection_without_replacement(
                    weighted_coords, n_moves)
                ai_thoughts += f"Picked {min(n_moves,len(weighted_coords))} random moves according to weights. "

                if pick_moves:
                    new_top = [
                        (p, Move((x, y), player=cn.next_player))
                        for p, wt, x, y in heapq.nlargest(5, pick_moves)
                    ]
                    aimove = new_top[0][1]
                    ai_thoughts += f"Top 5 among these were {fmt_moves(new_top)} and picked top {aimove.gtp()}. "
                    if new_top[0][0] < pass_policy:
                        ai_thoughts += f"But found pass ({pass_policy:.2%} to be higher rated than {aimove.gtp()} ({new_top[0][0]:.2%}) so will play top policy move instead."
                        aimove = top_policy_move
                else:
                    aimove = top_policy_move
                    ai_thoughts += f"Pick policy strategy {ai_mode} failed to find legal moves, so is playing top policy move {aimove.gtp()}."
            else:
                raise ValueError(f"Unknown Policy-based AI mode {ai_mode}")
    else:  # Engine based move
        candidate_ai_moves = cn.candidate_moves
        top_cand = Move.from_gtp(candidate_ai_moves[0]["move"],
                                 player=cn.next_player)
        if top_cand.is_pass:  # don't play suicidal to balance score - pass when it's best
            aimove = top_cand
            ai_thoughts += f"Top move is pass, so passing regardless of strategy."
        else:
            if ai_mode == AI_JIGO:
                sign = cn.player_sign(cn.next_player)
                jigo_move = min(
                    candidate_ai_moves,
                    key=lambda move: abs(sign * move["scoreLead"] -
                                         ai_settings["target_score"]))
                aimove = Move.from_gtp(jigo_move["move"],
                                       player=cn.next_player)
                ai_thoughts += f"Jigo strategy found {len(candidate_ai_moves)} candidate moves (best {top_cand.gtp()}) and chose {aimove.gtp()} as closest to 0.5 point win"
            elif ai_mode == AI_SCORELOSS:
                c = ai_settings["strength"]
                moves = [(
                    d["pointsLost"],
                    math.exp(min(200, -c * max(0, d["pointsLost"]))),
                    Move.from_gtp(d["move"], player=cn.next_player),
                ) for d in candidate_ai_moves]
                topmove = weighted_selection_without_replacement(moves, 1)[0]
                aimove = topmove[2]
                ai_thoughts += f"ScoreLoss strategy found {len(candidate_ai_moves)} candidate moves (best {top_cand.gtp()}) and chose {aimove.gtp()} (weight {topmove[1]:.3f}, point loss {topmove[0]:.1f}) based on score weights."
            else:
                if ai_mode != AI_DEFAULT:
                    game.katrain.log(
                        f"Unknown AI mode {ai_mode} or policy missing, using default.",
                        OUTPUT_INFO)
                    ai_thoughts += f"Strategy {ai_mode} not found or unexpected fallback."
                aimove = top_cand
                ai_thoughts += f"Default strategy found {len(candidate_ai_moves)} moves returned from the engine and chose {aimove.gtp()} as top move"
    game.katrain.log(f"AI thoughts: {ai_thoughts}", OUTPUT_DEBUG)
    played_node = game.play(aimove)
    played_node.ai_thoughts = ai_thoughts
    return aimove, played_node
コード例 #19
0
    def draw_hover_contents(self, *_args):
        ghost_alpha = POLICY_ALPHA
        katrain = self.katrain
        game_ended = katrain.game.end_result
        current_node = katrain.game.current_node
        player, next_player = current_node.player, current_node.next_player

        board_size_x, board_size_y = katrain.game.board_size
        if len(self.gridpos_x) < board_size_x or len(self.gridpos_y) < board_size_y:
            return  # race condition

        with self.canvas.after:
            self.canvas.after.clear()
            self.active_pv_moves = []

            # hints or PV
            hint_moves = []
            if (
                katrain.analysis_controls.hints.active
                and not katrain.analysis_controls.policy.active
                and not game_ended
            ):
                hint_moves = current_node.candidate_moves
            elif katrain.controls.status_state[1] == STATUS_TEACHING:  # show score hint for teaching  undo
                hint_moves = [
                    m
                    for m in current_node.candidate_moves
                    for c in current_node.children
                    if c.move and c.auto_undo and c.move.gtp() == m["move"]
                ]

            top_move_coords = None
            if hint_moves:
                low_visits_threshold = katrain.config("trainer/low_visits", 25)
                for move_dict in hint_moves:
                    move = Move.from_gtp(move_dict["move"])
                    if move.coords is not None:
                        engine_best_move = move_dict.get("order", 99) == 0
                        scale = HINT_SCALE
                        text_on = True
                        alpha = HINTS_ALPHA
                        if move_dict["visits"] < low_visits_threshold and not engine_best_move:
                            scale = UNCERTAIN_HINT_SCALE
                            text_on = False
                            alpha = HINTS_MIN_ALPHA + (HINTS_ALPHA - HINTS_MIN_ALPHA) * (
                                move_dict["visits"] / low_visits_threshold
                            )
                        if "pv" in move_dict:
                            self.active_pv_moves.append((move.coords, move_dict["pv"], current_node))
                        else:
                            katrain.log(f"PV missing for move_dict {move_dict}", OUTPUT_DEBUG)
                        evalsize = self.stone_size * scale
                        evalcol = self.eval_color(move_dict["pointsLost"])
                        Color(*evalcol[:3], alpha)
                        Rectangle(
                            pos=(self.gridpos_x[move.coords[0]] - evalsize, self.gridpos_y[move.coords[1]] - evalsize),
                            size=(2 * evalsize, 2 * evalsize),
                            source="img/topmove.png",
                        )
                        if self.trainer_config["text_point_loss"] and text_on:
                            if move_dict["pointsLost"] < 0.05:
                                ptloss_text = "0.0"
                            else:
                                ptloss_text = f"{-move_dict['pointsLost']:+.1f}"
                            sizefac = 1
                            Color(*BLACK)
                            draw_text(
                                pos=(self.gridpos_x[move.coords[0]], self.gridpos_y[move.coords[1]]),
                                text=ptloss_text,
                                font_size=self.grid_size * sizefac / 2.5,
                                font_name="Roboto",
                            )

                        if engine_best_move:
                            top_move_coords = move.coords
                            Color(*TOP_MOVE_BORDER_COLOR)
                            Line(
                                circle=(
                                    self.gridpos_x[move.coords[0]],
                                    self.gridpos_y[move.coords[1]],
                                    self.stone_size - dp(1.2),
                                ),
                                width=dp(1.2),
                            )

            # children of current moves in undo / review
            if katrain.analysis_controls.show_children.active:
                for child_node in current_node.children:
                    move = child_node.move
                    if move and move.coords is not None:
                        if child_node.analysis_ready:
                            self.active_pv_moves.append(
                                (move.coords, [move.gtp()] + child_node.candidate_moves[0]["pv"], current_node)
                            )

                        if move.coords != top_move_coords:  # for contrast
                            dashed_width = 18
                            Color(*STONE_CONTRAST_COLORS[child_node.player])
                            Line(
                                circle=(
                                    self.gridpos_x[move.coords[0]],
                                    self.gridpos_y[move.coords[1]],
                                    self.stone_size - dp(1.2),
                                ),
                                width=dp(1.2),
                            )
                        else:
                            dashed_width = 10
                        Color(*STONE_COLORS[child_node.player])
                        for s in range(0, 360, 30):
                            Line(
                                circle=(
                                    self.gridpos_x[move.coords[0]],
                                    self.gridpos_y[move.coords[1]],
                                    self.stone_size - dp(1.2),
                                    s,
                                    s + dashed_width,
                                ),
                                width=dp(1.2),
                            )

            # hover next move ghost stone
            if self.ghost_stone:
                self.draw_stone(*self.ghost_stone, next_player, alpha=ghost_alpha)

            animating_pv = self.animating_pv
            if animating_pv:
                pv, node, start_time, _ = animating_pv
                delay = self.katrain.config("general/anim_pv_time", 0.5)
                up_to_move = (time.time() - start_time) / delay
                self.draw_pv(pv, node, up_to_move)