示例#1
0
    def __init__(self, brain, trainer_parameters, training, load, seed, run_id):
        """
        Responsible for collecting experiences and training PPO model.
        :param  trainer_parameters: The parameters for the trainer (dictionary).
        :param training: Whether the trainer is set for training.
        :param load: Whether the model should be loaded.
        :param seed: The seed the model will be initialized with
        :param run_id: The The identifier of the current run
        """
        super(BCTrainer, self).__init__(brain, trainer_parameters, training, run_id)
        self.policy = BCPolicy(seed, brain, trainer_parameters, load)
        self.n_sequences = 1
        self.cumulative_rewards = {}
        self.episode_steps = {}
        self.stats = {'Losses/Cloning Loss': [], 'Environment/Episode Length': [],
                      'Environment/Cumulative Reward': []}

        self.summary_path = trainer_parameters['summary_path']
        self.batches_per_epoch = trainer_parameters['batches_per_epoch']
        if not os.path.exists(self.summary_path):
            os.makedirs(self.summary_path)

        self.demonstration_buffer = Buffer()
        self.evaluation_buffer = Buffer()
        self.summary_writer = tf.summary.FileWriter(self.summary_path)
    def __init__(self, brain, trainer_parameters, training, load, seed,
                 run_id):
        """
        Responsible for collecting experiences and training PPO model.
        :param  trainer_parameters: The parameters for the trainer (dictionary).
        :param training: Whether the trainer is set for training.
        :param load: Whether the model should be loaded.
        :param seed: The seed the model will be initialized with
        :param run_id: The identifier of the current run
        """
        super(BCTrainer, self).__init__(brain, trainer_parameters, training,
                                        run_id)
        self.policy = BCPolicy(seed, brain, trainer_parameters, load)
        self.n_sequences = 1
        self.cumulative_rewards = {}
        self.episode_steps = {}
        self.stats = {
            "Losses/Cloning Loss": [],
            "Environment/Episode Length": [],
            "Environment/Cumulative Reward": [],
        }

        self.batches_per_epoch = trainer_parameters["batches_per_epoch"]

        self.demonstration_buffer = Buffer()
        self.evaluation_buffer = Buffer()
示例#3
0
    def __init__(self, sess, brain, trainer_parameters, training, seed, run_id):
        """
        Responsible for collecting experiences and training PPO model.
        :param sess: Tensorflow session.
        :param  trainer_parameters: The parameters for the trainer (dictionary).
        :param training: Whether the trainer is set for training.
        """
        super(BehavioralCloningTrainer, self).__init__(sess, brain, trainer_parameters, training, run_id)

        self.param_keys = ['brain_to_imitate', 'batch_size', 'time_horizon',
                           'graph_scope', 'summary_freq', 'max_steps',
                           'batches_per_epoch', 'use_recurrent',
                           'hidden_units','learning_rate', 'num_layers',
                           'sequence_length', 'memory_size']

        for k in self.param_keys:
            if k not in trainer_parameters:
                raise UnityTrainerException("The hyperparameter {0} could not be found for the Imitation trainer of "
                                            "brain {1}.".format(k, brain.brain_name))

        self.policy = BCPolicy(seed, brain, trainer_parameters, sess)
        self.brain_name = brain.brain_name
        self.brain_to_imitate = trainer_parameters['brain_to_imitate']
        self.batches_per_epoch = trainer_parameters['batches_per_epoch']
        self.n_sequences = max(int(trainer_parameters['batch_size'] / self.policy.sequence_length), 1)
        self.cumulative_rewards = {}
        self.episode_steps = {}
        self.stats = {'losses': [], 'episode_length': [], 'cumulative_reward': []}

        self.training_buffer = Buffer()
        self.summary_path = trainer_parameters['summary_path']
        if not os.path.exists(self.summary_path):
            os.makedirs(self.summary_path)

        self.summary_writer = tf.summary.FileWriter(self.summary_path)
示例#4
0
def test_bc_policy_evaluate(mock_communicator, mock_launcher, dummy_config):
    tf.reset_default_graph()
    mock_communicator.return_value = MockCommunicator(
        discrete_action=False, visual_inputs=0)
    env = UnityEnvironment(' ')
    brain_infos = env.reset()
    brain_info = brain_infos[env.brain_names[0]]

    trainer_parameters = dummy_config
    model_path = env.brain_names[0]
    trainer_parameters['model_path'] = model_path
    trainer_parameters['keep_checkpoints'] = 3
    policy = BCPolicy(0, env.brains[env.brain_names[0]], trainer_parameters, False)
    run_out = policy.evaluate(brain_info)
    assert run_out['action'].shape == (3, 2)

    env.close()
示例#5
0
def test_bc_policy_evaluate(mock_communicator, mock_launcher):
    tf.reset_default_graph()
    with tf.Session() as sess:
        mock_communicator.return_value = MockCommunicator(
            discrete_action=False, visual_inputs=0)
        env = UnityEnvironment(' ')
        brain_infos = env.reset()
        brain_info = brain_infos[env.brain_names[0]]

        trainer_parameters = dummy_config()
        graph_scope = env.brain_names[0]
        trainer_parameters['graph_scope'] = graph_scope
        policy = BCPolicy(0, env.brains[env.brain_names[0]],
                          trainer_parameters, sess)
        init = tf.global_variables_initializer()
        sess.run(init)
        run_out = policy.evaluate(brain_info)
        assert run_out['action'].shape == (3, 2)

    env.close()
示例#6
0
def test_bc_policy_evaluate(mock_communicator, mock_launcher, dummy_config):
    tf.reset_default_graph()
    mock_communicator.return_value = MockCommunicator(discrete_action=False,
                                                      visual_inputs=0)
    env = UnityEnvironment(" ")
    env.reset()
    brain_name = env.get_agent_groups()[0]
    brain_info = step_result_to_brain_info(
        env.get_step_result(brain_name), env.get_agent_group_spec(brain_name))
    brain_params = group_spec_to_brain_parameters(
        brain_name, env.get_agent_group_spec(brain_name))

    trainer_parameters = dummy_config
    model_path = brain_name
    trainer_parameters["model_path"] = model_path
    trainer_parameters["keep_checkpoints"] = 3
    policy = BCPolicy(0, brain_params, trainer_parameters, False)
    run_out = policy.evaluate(brain_info)
    assert run_out["action"].shape == (3, 2)

    env.close()