def batch_info_worker_task(args):
    (
        environment_name,
        species_name,
        batch_num,
        worker_num,
        num_workers,
    ) = args

    # Go through every replay and sum up stats
    env_class = get_env_module(environment_name)
    replay_directory = find_batch_directory(environment_name, species_name,
                                            batch_num)
    ws = WorkerStats()
    for agent_replay in iter_replay_data(
            replay_directory,
            env_class.State,
            worker_num,
            num_workers,
    ):
        ws.total_mcts_considerations += agent_replay.total_mcts_considerations(
        )
        ws.num_games += 1
        ws.num_positions += len(agent_replay.positions)
    return astuple(ws)
Example #2
0
def find_batch_sample_files(environment, bot_species, batch_num, model_type):
    sample_file_paths = dict(
        meta=[],
        features=[],
        labels=[],
    )
    replay_directory = find_batch_directory(environment, bot_species,
                                            batch_num)
    for file_name in os.listdir(replay_directory):
        if not file_name.endswith(".npy"):
            continue

        # Find the type of data this file is
        for data_type in sample_file_paths.keys():
            if file_name.startswith(f"{model_type}_{data_type}_samples"):
                file_path = os.path.join(replay_directory, file_name)
                sample_file_paths[data_type].append(file_path)
                break

    # Presort them just to be safe.
    # - Operations on these files will assume they are paired up by index id.
    for _, paths in sample_file_paths.items():
        paths.sort()

    assert len(sample_file_paths["meta"]) == len(sample_file_paths["features"])
    assert len(sample_file_paths["features"]) == len(
        sample_file_paths["labels"])

    return sample_file_paths
Example #3
0
def sample_batch_replay_files(
    environment,
    species,
    batch,
):
    replay_directory = find_batch_directory(environment, species, batch)

    # Grabbing replays
    print("Grabbing replays")
    all_replays = []
    for file_name in os.listdir(replay_directory):
        if not file_name.endswith(".json"):
            continue
        file_path = os.path.join(replay_directory, file_name)
        all_replays.append(file_path)

    print("Shuffling")
    random.shuffle(all_replays)

    return all_replays
Example #4
0
        all_replays.append(file_path)

    print("Shuffling")
    random.shuffle(all_replays)

    return all_replays


def sample_batch_replays(
    environment,
    species,
    batch,
    speed=.3,
    first_n_moves=1_000,
):
    replay_directory = find_batch_directory(environment, species, batch)

    # Grabbing replays
    print("Grabbing replays")
    all_replays = []
    for file_name in os.listdir(replay_directory):
        if not file_name.endswith(".json"):
            continue
        file_path = os.path.join(replay_directory, file_name)
        all_replays.append(file_path)

    print("Shuffling")
    random.shuffle(all_replays)

    for replay_path in all_replays:
        replay_video(replay_path, speed, first_n_moves)
Example #5
0
def run_worker(args):
    # What is this, Perl??
    (
        environment,
        bot_species,
        batch_num,
        max_positions,
        worker_num,
        num_workers,
    ) = args

    env_class = get_env_module(environment)
    env = env_class.Environment()

    replay_directory = find_batch_directory(environment, bot_species,
                                            batch_num)

    print("Collecting Samples", replay_directory)
    value_meta = []
    value_features = []
    value_labels = []
    policy_meta = []
    policy_features = []
    policy_labels = []
    for position_num, sample in enumerate(
            generate_training_samples(
                replay_directory,
                env_class.State,
                env_class.generate_features,
                env,
                worker_num=worker_num,
                num_workers=num_workers,
            )):
        if position_num >= (max_positions - 1):
            break

        # game_bucket, features, labels (or just label for value)
        meta_info = sample[1]  # [game_bucket, generation, ...]
        if sample[0] == "value":
            value_meta.append(meta_info)
            value_features.append(sample[2])  # [[float, ...]]
            value_labels.append(sample[3])  # [int, ...]
        else:
            policy_meta.append(meta_info)
            policy_features.append(sample[2])
            policy_labels.append(sample[3])  # [[float, ...], ...]

    datasets = [
        ("value_meta", value_meta),
        ("value_features", value_features),
        ("value_labels", value_labels),
        ("policy_meta", policy_meta),
        ("policy_features", policy_features),
        ("policy_labels", policy_labels),
    ]
    for sample_type, data in datasets:
        basename = f"{sample_type}_samples_{worker_num + 1:04d}of{num_workers:04d}.npy"
        parsed_samples_path = f"{replay_directory}/{basename}"
        numpy.save(parsed_samples_path, data)
        print(f"Saved: {parsed_samples_path}")

    return position_num