Пример #1
0
 def test_fight2(self):
     player_blue = NNPlayer(Color.BLUE,
                            n_simulations=100,
                            janggi_net=JanggiNetwork(),
                            temperature_start=0.01,
                            temperature_threshold=30,
                            temperature_end=0.01)
     player_red = NNPlayer(Color.RED,
                           n_simulations=100,
                           janggi_net=JanggiNetwork(),
                           temperature_start=0.01,
                           temperature_threshold=30,
                           temperature_end=0.01)
     fight(player_blue, player_red, 100)
Пример #2
0
 def test_mcts_vs_random(self):
     player_blue = RandomPlayer(Color.BLUE)
     player_red = RandomMCTSPlayer(Color.RED,
                                   n_simulations=200,
                                   temperature_start=0.01,
                                   temperature_threshold=30,
                                   temperature_end=0.01)
     winner = fight(player_blue, player_red, 200)
     self.assertEqual(winner, Color.RED)
Пример #3
0
 def organize_fight(self):
     player_red = RandomPlayer(Color.RED)
     player_blue = NNPlayer(Color.BLUE,
                            n_simulations=self.n_simulations,
                            janggi_net=self.predictor,
                            temperature_start=0.01,
                            temperature_threshold=30,
                            temperature_end=0.01)
     fight(player_blue, player_red, self.iter_max)
     player_red = RandomMCTSPlayer(
         Color.RED,
         n_simulations=self.n_simulations_opponent,
         temperature_start=0.01,
         temperature_threshold=30,
         temperature_end=0.01)
     player_blue = NNPlayer(Color.BLUE,
                            n_simulations=self.n_simulations,
                            janggi_net=self.predictor,
                            temperature_start=0.01,
                            temperature_threshold=30,
                            temperature_end=0.01)
     fight(player_blue, player_red, self.iter_max)
Пример #4
0
    result = load()

    while True:

        players = ["random_mcts"]
        model_saver = ModelSaver()
        for i in range(model_saver.get_last_weight_index() + 1):
            players.append(i)
        if len(players) == 1:
            print("Not enough players")
            continue
        player_one_name, player_two_name = random.sample(players, k=2)
        player_one = get_player(player_one_name, Color.BLUE, model_saver)
        player_two = get_player(player_two_name, Color.RED, model_saver)

        player_one_name = str(player_one_name)
        player_two_name = str(player_two_name)

        if player_one_name not in result:
            result[player_one_name] = dict()
        if player_two_name not in result[player_one_name]:
            result[player_one_name][player_two_name] = [0, 0]

        winner = fight(player_one, player_two, 200)
        if winner == Color.BLUE:
            result[player_one_name][player_two_name][0] += 1
        else:
            result[player_one_name][player_two_name][1] += 1
        print_results(result)
        save(result)
Пример #5
0
from ia.random_mcts_player import NNPlayer, fight, RandomMCTSPlayer
from janggi.parameters import N_ITERATIONS, DEFAULT_N_SIMULATIONS
from janggi.utils import Color

# Example of command:
#    python3 show_match_nn.py --number_simulations 800 --n_iterations 200 --root_file_inference /tmp/showmatch --parallel_mcts True --n_threads_mcts 10

player_blue = NNPlayer(Color.BLUE,
                       n_simulations=DEFAULT_N_SIMULATIONS,
                       janggi_net=FilePredictor(),
                       temperature_start=0.01,
                       temperature_threshold=30,
                       temperature_end=0.01,
                       print_info=True)

player_red = RandomMCTSPlayer(Color.RED,
                              n_simulations=16000,
                              temperature_start=0.01,
                              temperature_threshold=30,
                              temperature_end=0.01,
                              print_info=True)

# player_red = NNPlayer(Color.RED, n_simulations=DEFAULT_N_SIMULATIONS,
#                       janggi_net=FilePredictor(),
#                       temperature_start=0.01,
#                       temperature_threshold=30,
#                       temperature_end=0.01,
#                       print_info=True)

fight(player_blue, player_red, N_ITERATIONS, print_board=True)
Пример #6
0
from ia.random_mcts_player import fight, RandomMCTSPlayer
from janggi.action import Action
from janggi.player import Player
from janggi.utils import Color


class HumanPlayer(Player):
    def play_action(self):
        while True:
            read_data = input("Enter your action:")
            try:
                action = Action.from_uci_usi(read_data.strip())
                break
            except IndexError:
                print("Invalid Action")
        return action


if __name__ == "__main__":
    player_blue = HumanPlayer(Color.BLUE)
    player_red = RandomMCTSPlayer(Color.RED,
                                  n_simulations=16000,
                                  temperature_start=0.01,
                                  temperature_threshold=30,
                                  temperature_end=0.01,
                                  think_when_other=True,
                                  print_info=True)
    winner = fight(player_blue, player_red, 200, print_board=True)
Пример #7
0
 def continuous_learning_once(self):
     # First, train
     for _ in range(EPOCH_NUMBER_CONTINUOUS):
         training_set = []
         for example in _raw_to_examples(
                 self.model_saver.all_episodes_raw_iterators(),
                 PROP_POPULATION_FOR_LEARNING):
             training_set.append(example)
             if len(training_set) > N_LAST_GAME_TO_CONSIDER:
                 if not TRAIN_ON_ALL:
                     break
                 self.train(training_set)
                 training_set = []
         self.train(training_set)
     # Then, fight!
     # old_model = copy.deepcopy(self.predictor)
     self.model_saver.load_latest_model(self.old_model, None)
     self.old_model.to(DEVICE)
     victories = 0
     print("Start the fights!")
     for i in range(N_FIGHTS):
         if i < N_FIGHTS / 2:
             print("I am BLUE")
             new_player = NNPlayer(Color.BLUE,
                                   n_simulations=self.n_simulations,
                                   janggi_net=self.predictor,
                                   temperature_start=0.01,
                                   temperature_threshold=30,
                                   temperature_end=0.01)
             old_player = NNPlayer(Color.RED,
                                   n_simulations=self.n_simulations,
                                   janggi_net=self.old_model,
                                   temperature_start=0.01,
                                   temperature_threshold=30,
                                   temperature_end=0.01)
             winner = fight(new_player, old_player, self.iter_max)
             if winner == Color.BLUE:
                 victories += 1
         else:
             print("I am RED")
             new_player = NNPlayer(Color.RED,
                                   n_simulations=self.n_simulations,
                                   janggi_net=self.predictor,
                                   temperature_start=0.01,
                                   temperature_threshold=30,
                                   temperature_end=0.01)
             old_player = NNPlayer(Color.BLUE,
                                   n_simulations=self.n_simulations,
                                   janggi_net=self.old_model,
                                   temperature_start=0.01,
                                   temperature_threshold=30,
                                   temperature_end=0.01)
             winner = fight(old_player, new_player, self.iter_max)
             if winner == Color.RED:
                 victories += 1
         if (victories + N_FIGHTS - i -
                 1) / N_FIGHTS * 100 < VICTORY_THRESHOLD:
             # There is no more hope...
             break
     victory_percentage = victories / N_FIGHTS * 100
     if victory_percentage > VICTORY_THRESHOLD:
         # Replace model
         print("The model was good enough", victory_percentage)
         self.model_saver.save_weights(self.predictor,
                                       optimizer=self.optimizer)
     else:
         # We do not save the model
         print("The model was not good enough", victory_percentage)