示例#1
0
 def get_encoder_for_obs(
     shape: Tuple[int, ...],
     normalize: bool,
     h_size: int,
     vis_encode_type: EncoderType,
 ) -> Tuple[nn.Module, int]:
     """
     Returns the encoder and the size of the appropriate encoder.
     :param shape: Tuples that represent the observation dimension.
     :param normalize: Normalize all vector inputs.
     :param h_size: Number of hidden units per layer.
     :param vis_encode_type: Type of visual encoder to use.
     """
     if len(shape) == 1:
         # Case rank 1 tensor
         return (VectorInput(shape[0], normalize), shape[0])
     if len(shape) == 3:
         ModelUtils._check_resolution_for_encoder(shape[0], shape[1],
                                                  vis_encode_type)
         visual_encoder_class = ModelUtils.get_encoder_for_type(
             vis_encode_type)
         return (visual_encoder_class(shape[0], shape[1], shape[2],
                                      h_size), h_size)
     raise UnityTrainerException(
         f"Unsupported shape of {shape} for observation")
示例#2
0
    def create_input_processors(
        observation_shapes: List[Tuple[int, ...]],
        h_size: int,
        vis_encode_type: EncoderType,
        normalize: bool = False,
    ) -> Tuple[nn.ModuleList, nn.ModuleList, int]:
        """
        Creates visual and vector encoders, along with their normalizers.
        :param observation_shapes: List of Tuples that represent the action dimensions.
        :param action_size: Number of additional un-normalized inputs to each vector encoder. Used for
            conditioining network on other values (e.g. actions for a Q function)
        :param h_size: Number of hidden units per layer.
        :param vis_encode_type: Type of visual encoder to use.
        :param unnormalized_inputs: Vector inputs that should not be normalized, and added to the vector
            obs.
        :param normalize: Normalize all vector inputs.
        :return: Tuple of visual encoders and vector encoders each as a list.
        """
        visual_encoders: List[nn.Module] = []
        vector_encoders: List[nn.Module] = []

        visual_encoder_class = ModelUtils.get_encoder_for_type(vis_encode_type)
        vector_size = 0
        visual_output_size = 0
        for i, dimension in enumerate(observation_shapes):
            if len(dimension) == 3:
                ModelUtils._check_resolution_for_encoder(
                    dimension[0], dimension[1], vis_encode_type
                )
                visual_encoders.append(
                    visual_encoder_class(
                        dimension[0], dimension[1], dimension[2], h_size
                    )
                )
                visual_output_size += h_size
            elif len(dimension) == 1:
                vector_size += dimension[0]
            else:
                raise UnityTrainerException(
                    f"Unsupported shape of {dimension} for observation {i}"
                )
        if vector_size > 0:
            vector_encoders.append(VectorInput(vector_size, normalize))
        # Total output size for all inputs + CNNs
        total_processed_size = vector_size + visual_output_size
        return (
            nn.ModuleList(visual_encoders),
            nn.ModuleList(vector_encoders),
            total_processed_size,
        )
示例#3
0
def test_vector_encoder(mock_normalizer):
    mock_normalizer_inst = mock.Mock()
    mock_normalizer.return_value = mock_normalizer_inst
    input_size = 64
    normalize = False
    vector_encoder = VectorInput(input_size, normalize)
    output = vector_encoder(torch.ones((1, input_size)))
    assert output.shape == (1, input_size)

    normalize = True
    vector_encoder = VectorInput(input_size, normalize)
    new_vec = torch.ones((1, input_size))
    vector_encoder.update_normalization(new_vec)

    mock_normalizer.assert_called_with(input_size)
    mock_normalizer_inst.update.assert_called_with(new_vec)

    vector_encoder2 = VectorInput(input_size, normalize)
    vector_encoder.copy_normalization(vector_encoder2)
    mock_normalizer_inst.copy_from.assert_called_with(mock_normalizer_inst)
示例#4
0
    def get_encoder_for_obs(
        obs_spec: ObservationSpec,
        normalize: bool,
        h_size: int,
        attention_embedding_size: int,
        vis_encode_type: EncoderType,
    ) -> Tuple[nn.Module, int]:
        """
        Returns the encoder and the size of the appropriate encoder.
        :param shape: Tuples that represent the observation dimension.
        :param normalize: Normalize all vector inputs.
        :param h_size: Number of hidden units per layer excluding attention layers.
        :param attention_embedding_size: Number of hidden units per attention layer.
        :param vis_encode_type: Type of visual encoder to use.
        """
        shape = obs_spec.shape
        dim_prop = obs_spec.dimension_property

        # VISUAL
        if dim_prop in ModelUtils.VALID_VISUAL_PROP:
            visual_encoder_class = ModelUtils.get_encoder_for_type(
                vis_encode_type)
            return (visual_encoder_class(shape[0], shape[1], shape[2],
                                         h_size), h_size)
        # VECTOR
        if dim_prop in ModelUtils.VALID_VECTOR_PROP:
            return (VectorInput(shape[0], normalize), shape[0])
        # VARIABLE LENGTH
        if dim_prop in ModelUtils.VALID_VAR_LEN_PROP:
            return (
                EntityEmbedding(
                    entity_size=shape[1],
                    entity_num_max_elements=shape[0],
                    embedding_size=attention_embedding_size,
                ),
                0,
            )
        # OTHER
        raise UnityTrainerException(
            f"Unsupported Sensor with specs {obs_spec}")