def test_create_manager():
    run_options = RunOptions.from_dict(yaml.safe_load(test_everything_config_yaml))
    param_manager = EnvironmentParameterManager(
        run_options.environment_parameters, 1337, False
    )
    assert param_manager.get_minimum_reward_buffer_size("fake_behavior") == 100
    assert param_manager.get_current_lesson_number() == {
        "param_1": 0,
        "param_2": 0,
        "param_3": 0,
    }
    assert param_manager.get_current_samplers() == {
        "param_1": ConstantSettings(seed=1337, value=1),
        "param_2": GaussianSettings(seed=1337 + 3, mean=4, st_dev=5),
        "param_3": ConstantSettings(seed=1337 + 3 + 1, value=20),
    }
    # Not enough episodes completed
    assert param_manager.update_lessons(
        trainer_steps={"fake_behavior": 500},
        trainer_max_steps={"fake_behavior": 1000},
        trainer_reward_buffer={"fake_behavior": [1000] * 99},
    ) == (False, False)
    # Not enough episodes reward
    assert param_manager.update_lessons(
        trainer_steps={"fake_behavior": 500},
        trainer_max_steps={"fake_behavior": 1000},
        trainer_reward_buffer={"fake_behavior": [1] * 101},
    ) == (False, False)
    assert param_manager.update_lessons(
        trainer_steps={"fake_behavior": 500},
        trainer_max_steps={"fake_behavior": 1000},
        trainer_reward_buffer={"fake_behavior": [1000] * 101},
    ) == (True, True)
    assert param_manager.get_current_lesson_number() == {
        "param_1": 1,
        "param_2": 0,
        "param_3": 0,
    }
    param_manager_2 = EnvironmentParameterManager(
        run_options.environment_parameters, 1337, restore=True
    )
    # The use of global status should make it so that the lesson numbers are maintained
    assert param_manager_2.get_current_lesson_number() == {
        "param_1": 1,
        "param_2": 0,
        "param_3": 0,
    }
    # No reset required
    assert param_manager.update_lessons(
        trainer_steps={"fake_behavior": 700},
        trainer_max_steps={"fake_behavior": 1000},
        trainer_reward_buffer={"fake_behavior": [0] * 101},
    ) == (True, False)
    assert param_manager.get_current_samplers() == {
        "param_1": UniformSettings(seed=1337 + 2, min_value=1, max_value=3),
        "param_2": GaussianSettings(seed=1337 + 3, mean=4, st_dev=5),
        "param_3": ConstantSettings(seed=1337 + 3 + 1, value=20),
    }
    def _initialize_trainer(
        trainer_settings: TrainerSettings,
        brain_name: str,
        output_path: str,
        train_model: bool,
        load_model: bool,
        ghost_controller: GhostController,
        seed: int,
        param_manager: EnvironmentParameterManager,
        init_path: str = None,
        multi_gpu: bool = False,
    ) -> Trainer:
        """
        Initializes a trainer given a provided trainer configuration and brain parameters, as well as
        some general training session options.

        :param trainer_settings: Original trainer configuration loaded from YAML
        :param brain_name: Name of the brain to be associated with trainer
        :param output_path: Path to save the model and summary statistics
        :param keep_checkpoints: How many model checkpoints to keep
        :param train_model: Whether to train the model (vs. run inference)
        :param load_model: Whether to load the model or randomly initialize
        :param ghost_controller: The object that coordinates ghost trainers
        :param seed: The random seed to use
        :param param_manager: EnvironmentParameterManager, used to determine a reward buffer length for PPOTrainer
        :param init_path: Path from which to load model, if different from model_path.
        :return:
        """
        trainer_artifact_path = os.path.join(output_path, brain_name)
        if init_path is not None:
            trainer_settings.init_path = os.path.join(init_path, brain_name)

        min_lesson_length = param_manager.get_minimum_reward_buffer_size(
            brain_name)

        trainer: Trainer = None  # type: ignore  # will be set to one of these, or raise
        trainer_type = trainer_settings.trainer_type

        if trainer_type == TrainerType.PPO:
            trainer = PPOTrainer(
                brain_name,
                min_lesson_length,
                trainer_settings,
                train_model,
                load_model,
                seed,
                trainer_artifact_path,
            )
        elif trainer_type == TrainerType.SAC:
            trainer = SACTrainer(
                brain_name,
                min_lesson_length,
                trainer_settings,
                train_model,
                load_model,
                seed,
                trainer_artifact_path,
            )
        else:
            raise TrainerConfigError(
                f'The trainer config contains an unknown trainer type "{trainer_type}" for brain {brain_name}'
            )

        if trainer_settings.self_play is not None:
            trainer = GhostTrainer(
                trainer,
                brain_name,
                ghost_controller,
                min_lesson_length,
                trainer_settings,
                train_model,
                trainer_artifact_path,
            )
        return trainer