示例#1
0
def muzero(config: MuZeroConfig):
    """
    MuZero training is split into two independent parts: Network training and
    self-play data generation.
    These two parts only communicate by transferring the latest networks checkpoint
    from the training to the self-play, and the finished games from the self-play
    to the training.
    In contrast to the original MuZero algorithm this version doesn't works with
    multiple threads, therefore the training and self-play is done alternately.
    """
    storage = SharedStorage(config.new_network(), config.uniform_network(),
                            config.new_optimizer())
    replay_buffer = ReplayBuffer(config)

    for loop in range(config.nb_training_loop):
        print("Training loop", loop)
        score_train = run_selfplay(config, storage, replay_buffer,
                                   config.nb_episodes)
        train_network(config, storage, replay_buffer, config.nb_epochs)

        print("Train score:", score_train)
        print("Eval score:", run_eval(config, storage, NUM_EVAL_EPISODES))
        print(
            f"MuZero played {config.nb_episodes * (loop + 1)} "
            f"episodes and trained for {config.nb_epochs * (loop + 1)} epochs.\n"
        )

    return storage.latest_network()
def muzero(config: MuZeroConfig):
    """
    MuZero training is split into two independent parts: Network training and
    self-play data generation.
    These two parts only communicate by transferring the latest networks checkpoint
    from the training to the self-play, and the finished games from the self-play
    to the training.
    In contrast to the original MuZero algorithm this version doesn't works with
    multiple threads, therefore the training and self-play is done alternately.
    """
    network = config.new_network()
    storage = SharedStorage(network, config.uniform_network(),
                            config.new_optimizer(network))
    replay_buffer = ReplayBuffer(config)

    train_scores = []
    eval_scores = []
    train_losses = []
    for loop in range(config.nb_training_loop):
        print("Training loop", loop)
        score_train = run_selfplay(config, storage, replay_buffer,
                                   config.nb_episodes)
        train_losses += train_network(config, storage, replay_buffer,
                                      config.nb_epochs)
        print("Train score:", score_train)
        score_eval = run_eval(config, storage, 50)
        print("Eval score:", score_eval)
        print(
            f"MuZero played {config.nb_episodes * (loop + 1)} "
            f"episodes and trained for {config.nb_epochs * (loop + 1)} epochs.\n"
        )
        train_scores.append(score_train)
        eval_scores.append(score_eval)

    plt.figure(1)
    plt.plot(train_scores)
    plt.plot(eval_scores)
    plt.title('MuZero Average Rewards')
    plt.xlabel('MuZero Iterations (Train/Eval)')
    plt.ylabel('Reward Score')
    plt.legend(['Train score', 'Eval score'])

    plt.figure(2)
    plt.plot(train_losses, color='green')
    plt.title('MuZero Training Loss')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.show()

    return storage.latest_network()
示例#3
0
def muzero(config: MuZeroConfig):
    storage = SharedStorage(config.new_network(), config.uniform_network(), config.new_optimizer())
    replay_buffer = ReplayBuffer(config)

    for loop in range(config.nb_training_loop):
        print("Training loop", loop)
        score_train = run_selfplay(config, storage, replay_buffer, config.nb_episodes)
        train_network(config, storage, replay_buffer, config.nb_epochs)

        print("Train score:", score_train)
        print("Eval score:", run_eval(config, storage, 50))
        print(f"MuZero played {config.nb_episodes * (loop + 1)} "
              f"episodes and trained for {config.nb_epochs * (loop + 1)} epochs.\n")

    storage.save_network_dir(config.nb_training_loop)

    return storage.latest_network()