Пример #1
0
def test_ppo_get_value_estimates(mock_communicator, mock_launcher,
                                 dummy_config):
    tf.reset_default_graph()

    brain_params = BrainParameters(
        brain_name="test_brain",
        vector_observation_space_size=1,
        camera_resolutions=[],
        vector_action_space_size=[2],
        vector_action_descriptions=[],
        vector_action_space_type=0,
    )
    dummy_config["summary_path"] = "./summaries/test_trainer_summary"
    dummy_config["model_path"] = "./models/test_trainer_models/TestModel"
    policy = NNPolicy(0,
                      brain_params,
                      dummy_config,
                      False,
                      False,
                      create_tf_graph=False)
    optimizer = PPOOptimizer(policy, dummy_config)
    time_horizon = 15
    trajectory = make_fake_trajectory(
        length=time_horizon,
        max_step_complete=True,
        vec_obs_size=1,
        num_vis_obs=0,
        action_space=[2],
    )
    run_out, final_value_out = optimizer.get_trajectory_value_estimates(
        trajectory.to_agentbuffer(), trajectory.next_obs, done=False)
    for key, val in run_out.items():
        assert type(key) is str
        assert len(val) == 15

    run_out, final_value_out = optimizer.get_trajectory_value_estimates(
        trajectory.to_agentbuffer(), trajectory.next_obs, done=True)
    for key, val in final_value_out.items():
        assert type(key) is str
        assert val == 0.0

    # Check if we ignore terminal states properly
    optimizer.reward_signals["extrinsic"].use_terminal_states = False
    run_out, final_value_out = optimizer.get_trajectory_value_estimates(
        trajectory.to_agentbuffer(), trajectory.next_obs, done=False)
    for key, val in final_value_out.items():
        assert type(key) is str
        assert val != 0.0
Пример #2
0
def create_optimizer_mock(trainer_config, reward_signal_config, use_rnn,
                          use_discrete, use_visual):
    mock_specs = mb.setup_test_behavior_specs(
        use_discrete,
        use_visual,
        vector_action_space=DISCRETE_ACTION_SPACE
        if use_discrete else VECTOR_ACTION_SPACE,
        vector_obs_space=VECTOR_OBS_SPACE if not use_visual else 0,
    )
    trainer_settings = trainer_config
    trainer_settings.reward_signals = reward_signal_config
    trainer_settings.network_settings.memory = (NetworkSettings.MemorySettings(
        sequence_length=16, memory_size=10) if use_rnn else None)
    policy = NNPolicy(0,
                      mock_specs,
                      trainer_settings,
                      False,
                      "test",
                      False,
                      create_tf_graph=False)
    if trainer_settings.trainer_type == TrainerType.SAC:
        optimizer = SACOptimizer(policy, trainer_settings)
    else:
        optimizer = PPOOptimizer(policy, trainer_settings)
    return optimizer
def create_optimizer_mock(trainer_config, reward_signal_config, use_rnn,
                          use_discrete, use_visual):
    mock_brain = mb.setup_mock_brain(
        use_discrete,
        use_visual,
        vector_action_space=VECTOR_ACTION_SPACE,
        vector_obs_space=VECTOR_OBS_SPACE,
        discrete_action_space=DISCRETE_ACTION_SPACE,
    )

    trainer_parameters = trainer_config
    model_path = "testpath"
    trainer_parameters["model_path"] = model_path
    trainer_parameters["keep_checkpoints"] = 3
    trainer_parameters["reward_signals"].update(reward_signal_config)
    trainer_parameters["use_recurrent"] = use_rnn
    policy = NNPolicy(0,
                      mock_brain,
                      trainer_parameters,
                      False,
                      False,
                      create_tf_graph=False)
    if trainer_parameters["trainer"] == "sac":
        optimizer = SACOptimizer(policy, trainer_parameters)
    else:
        optimizer = PPOOptimizer(policy, trainer_parameters)
    return optimizer
Пример #4
0
 def create_ppo_optimizer(self) -> PPOOptimizer:
     if self.framework == FrameworkType.PYTORCH:
         return TorchPPOOptimizer(  # type: ignore
             cast(TorchPolicy, self.policy), self.trainer_settings  # type: ignore
         )  # type: ignore
     else:
         return PPOOptimizer(  # type: ignore
             cast(TFPolicy, self.policy), self.trainer_settings  # type: ignore
         )  # type: ignore
Пример #5
0
 def add_policy(self, parsed_behavior_id: BehaviorIdentifiers,
                policy: TFPolicy) -> None:
     """
     Adds policy to trainer.
     :param parsed_behavior_id: Behavior identifiers that the policy should belong to.
     :param policy: Policy to associate with name_behavior_id.
     """
     if self.policy:
         logger.warning(
             "Your environment contains multiple teams, but {} doesn't support adversarial games. Enable self-play to \
                 train adversarial games.".format(self.__class__.__name__))
     self.policy = policy
     self.policies[parsed_behavior_id.behavior_id] = policy
     self.optimizer = PPOOptimizer(self.policy, self.trainer_settings)
     for _reward_signal in self.optimizer.reward_signals.keys():
         self.collected_rewards[_reward_signal] = defaultdict(lambda: 0)
     # Needed to resume loads properly
     self.step = policy.get_current_step()
Пример #6
0
def _create_ppo_optimizer_ops_mock(dummy_config, use_rnn, use_discrete, use_visual):
    mock_brain = mb.setup_mock_brain(
        use_discrete,
        use_visual,
        vector_action_space=VECTOR_ACTION_SPACE,
        vector_obs_space=VECTOR_OBS_SPACE,
        discrete_action_space=DISCRETE_ACTION_SPACE,
    )

    trainer_parameters = dummy_config
    model_path = "testmodel"
    trainer_parameters["model_path"] = model_path
    trainer_parameters["keep_checkpoints"] = 3
    trainer_parameters["use_recurrent"] = use_rnn
    policy = NNPolicy(
        0, mock_brain, trainer_parameters, False, False, create_tf_graph=False
    )
    optimizer = PPOOptimizer(policy, trainer_parameters)
    return optimizer
Пример #7
0
 def add_policy(self, name_behavior_id: str, policy: TFPolicy) -> None:
     """
     Adds policy to trainer.
     :param name_behavior_id: Behavior ID that the policy should belong to.
     :param policy: Policy to associate with name_behavior_id.
     """
     if self.policy:
         logger.warning(
             "Your environment contains multiple teams, but {} doesn't support adversarial games. Enable self-play to \
                 train adversarial games.".format(self.__class__.__name__))
     if not isinstance(policy, NNPolicy):
         raise RuntimeError(
             "Non-NNPolicy passed to PPOTrainer.add_policy()")
     self.policy = policy
     self.optimizer = PPOOptimizer(self.policy, self.trainer_parameters)
     for _reward_signal in self.optimizer.reward_signals.keys():
         self.collected_rewards[_reward_signal] = defaultdict(lambda: 0)
     # Needed to resume loads properly
     self.step = policy.get_current_step()
     self.next_summary_step = self._get_next_summary_step()
Пример #8
0
 def add_policy(self, name_behavior_id: str, policy: TFPolicy) -> None:
     """
     Adds policy to trainer.
     :param name_behavior_id: Behavior ID that the policy should belong to.
     :param policy: Policy to associate with name_behavior_id.
     """
     if self.policy:
         logger.warning(
             "add_policy has been called twice. {} is not a multi-agent trainer"
             .format(self.__class__.__name__))
     if not isinstance(policy, NNPolicy):
         raise RuntimeError(
             "Non-NNPolicy passed to PPOTrainer.add_policy()")
     self.policy = policy
     self.optimizer = PPOOptimizer(self.policy, self.trainer_parameters)
     for _reward_signal in self.optimizer.reward_signals.keys():
         self.collected_rewards[_reward_signal] = defaultdict(lambda: 0)
     # Needed to resume loads properly
     self.step = policy.get_current_step()
     self.next_summary_step = self._get_next_summary_step()
Пример #9
0
def _create_ppo_optimizer_ops_mock(dummy_config, use_rnn, use_discrete, use_visual):
    mock_brain = mb.setup_mock_brain(
        use_discrete,
        use_visual,
        vector_action_space=VECTOR_ACTION_SPACE,
        vector_obs_space=VECTOR_OBS_SPACE,
        discrete_action_space=DISCRETE_ACTION_SPACE,
    )

    trainer_settings = attr.evolve(dummy_config)
    trainer_settings.network_settings.memory = (
        NetworkSettings.MemorySettings(sequence_length=16, memory_size=10)
        if use_rnn
        else None
    )
    policy = NNPolicy(
        0, mock_brain, trainer_settings, False, "test", False, create_tf_graph=False
    )
    optimizer = PPOOptimizer(policy, trainer_settings)
    return optimizer
Пример #10
0
 def create_ppo_optimizer(self) -> PPOOptimizer:
     return PPOOptimizer(cast(TFPolicy, self.policy), self.trainer_settings)