예제 #1
0
def test_min_visual_size():
    # Make sure each EncoderType has an entry in MIS_RESOLUTION_FOR_ENCODER
    assert set(
        LearningModel.MIN_RESOLUTION_FOR_ENCODER.keys()) == set(EncoderType)

    for encoder_type in EncoderType:
        with tf.Graph().as_default():
            good_size = LearningModel.MIN_RESOLUTION_FOR_ENCODER[encoder_type]
            good_res = CameraResolution(width=good_size,
                                        height=good_size,
                                        num_channels=3)
            LearningModel._check_resolution_for_encoder(good_res, encoder_type)
            vis_input = LearningModel.create_visual_input(
                good_res, "test_min_visual_size")
            enc_func = LearningModel.get_encoder_for_type(encoder_type)
            enc_func(vis_input, 32, LearningModel.swish, 1, "test", False)

        # Anything under the min size should raise an exception. If not, decrease the min size!
        with pytest.raises(Exception):
            with tf.Graph().as_default():
                bad_size = LearningModel.MIN_RESOLUTION_FOR_ENCODER[
                    encoder_type] - 1
                bad_res = CameraResolution(width=bad_size,
                                           height=bad_size,
                                           num_channels=3)

                with pytest.raises(UnityTrainerException):
                    # Make sure we'd hit a friendly error during model setup time.
                    LearningModel._check_resolution_for_encoder(
                        bad_res, encoder_type)

                vis_input = LearningModel.create_visual_input(
                    bad_res, "test_min_visual_size")
                enc_func = LearningModel.get_encoder_for_type(encoder_type)
                enc_func(vis_input, 32, LearningModel.swish, 1, "test", False)
예제 #2
0
    def create_curiosity_encoders(self) -> Tuple[tf.Tensor, tf.Tensor]:
        """
        Creates state encoders for current and future observations.
        Used for implementation of Curiosity-driven Exploration by Self-supervised Prediction
        See https://arxiv.org/abs/1705.05363 for more details.
        :return: current and future state encoder tensors.
        """
        encoded_state_list = []
        encoded_next_state_list = []

        if self.policy_model.vis_obs_size > 0:
            self.next_visual_in = []
            visual_encoders = []
            next_visual_encoders = []
            for i in range(self.policy_model.vis_obs_size):
                # Create input ops for next (t+1) visual observations.
                next_visual_input = LearningModel.create_visual_input(
                    self.policy_model.brain.camera_resolutions[i],
                    name="curiosity_next_visual_observation_" + str(i),
                )
                self.next_visual_in.append(next_visual_input)

                # Create the encoder ops for current and next visual input.
                # Note that these encoders are siamese.
                encoded_visual = self.policy_model.create_visual_observation_encoder(
                    self.policy_model.visual_in[i],
                    self.encoding_size,
                    LearningModel.swish,
                    1,
                    "curiosity_stream_{}_visual_obs_encoder".format(i),
                    False,
                )

                encoded_next_visual = self.policy_model.create_visual_observation_encoder(
                    self.next_visual_in[i],
                    self.encoding_size,
                    LearningModel.swish,
                    1,
                    "curiosity_stream_{}_visual_obs_encoder".format(i),
                    True,
                )
                visual_encoders.append(encoded_visual)
                next_visual_encoders.append(encoded_next_visual)

            hidden_visual = tf.concat(visual_encoders, axis=1)
            hidden_next_visual = tf.concat(next_visual_encoders, axis=1)
            encoded_state_list.append(hidden_visual)
            encoded_next_state_list.append(hidden_next_visual)

        if self.policy_model.vec_obs_size > 0:
            # Create the encoder ops for current and next vector input.
            # Note that these encoders are siamese.
            # Create input op for next (t+1) vector observation.
            self.next_vector_in = tf.placeholder(
                shape=[None, self.policy_model.vec_obs_size],
                dtype=tf.float32,
                name="curiosity_next_vector_observation",
            )

            encoded_vector_obs = self.policy_model.create_vector_observation_encoder(
                self.policy_model.vector_in,
                self.encoding_size,
                LearningModel.swish,
                2,
                "curiosity_vector_obs_encoder",
                False,
            )
            encoded_next_vector_obs = self.policy_model.create_vector_observation_encoder(
                self.next_vector_in,
                self.encoding_size,
                LearningModel.swish,
                2,
                "curiosity_vector_obs_encoder",
                True,
            )
            encoded_state_list.append(encoded_vector_obs)
            encoded_next_state_list.append(encoded_next_vector_obs)

        encoded_state = tf.concat(encoded_state_list, axis=1)
        encoded_next_state = tf.concat(encoded_next_state_list, axis=1)
        return encoded_state, encoded_next_state