def test_min_visual_size():
    # Make sure each EncoderType has an entry in MIS_RESOLUTION_FOR_ENCODER
    assert set(
        ModelUtils.MIN_RESOLUTION_FOR_ENCODER.keys()) == set(EncoderType)

    for encoder_type in EncoderType:
        with tf.Graph().as_default():
            good_size = ModelUtils.MIN_RESOLUTION_FOR_ENCODER[encoder_type]
            good_res = CameraResolution(width=good_size,
                                        height=good_size,
                                        num_channels=3)
            vis_input = ModelUtils.create_visual_input(good_res,
                                                       "test_min_visual_size")
            ModelUtils._check_resolution_for_encoder(vis_input, encoder_type)
            enc_func = ModelUtils.get_encoder_for_type(encoder_type)
            enc_func(vis_input, 32, ModelUtils.swish, 1, "test", False)

        # Anything under the min size should raise an exception. If not, decrease the min size!
        with pytest.raises(Exception):
            with tf.Graph().as_default():
                bad_size = ModelUtils.MIN_RESOLUTION_FOR_ENCODER[
                    encoder_type] - 1
                bad_res = CameraResolution(width=bad_size,
                                           height=bad_size,
                                           num_channels=3)
                vis_input = ModelUtils.create_visual_input(
                    bad_res, "test_min_visual_size")

                with pytest.raises(UnityTrainerException):
                    # Make sure we'd hit a friendly error during model setup time.
                    ModelUtils._check_resolution_for_encoder(
                        vis_input, encoder_type)

                enc_func = ModelUtils.get_encoder_for_type(encoder_type)
                enc_func(vis_input, 32, ModelUtils.swish, 1, "test", False)
def group_spec_to_brain_parameters(
    name: str, group_spec: AgentGroupSpec
) -> BrainParameters:
    vec_size = np.sum(
        [shape[0] for shape in group_spec.observation_shapes if len(shape) == 1]
    )
    vis_sizes = [shape for shape in group_spec.observation_shapes if len(shape) == 3]
    cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
    a_size: List[int] = []
    if group_spec.is_action_discrete():
        a_size += list(group_spec.discrete_action_branches)
        vector_action_space_type = 0
    else:
        a_size += [group_spec.action_size]
        vector_action_space_type = 1
    return BrainParameters(
        name, int(vec_size), cam_res, a_size, [], vector_action_space_type
    )
Ejemplo n.º 3
0
def make_brain_parameters(
    discrete_action: bool = False,
    visual_inputs: int = 0,
    brain_name: str = "RealFakeBrain",
    vec_obs_size: int = 6,
) -> BrainParameters:
    resolutions = [
        CameraResolution(width=30, height=40, num_channels=3)
        for _ in range(visual_inputs)
    ]

    return BrainParameters(
        vector_observation_space_size=vec_obs_size,
        camera_resolutions=resolutions,
        vector_action_space_size=[2],
        vector_action_descriptions=["", ""],
        vector_action_space_type=int(not discrete_action),
        brain_name=brain_name,
    )
Ejemplo n.º 4
0
def group_spec_to_brain_parameters(
        name: str, group_spec
) -> BrainParameters:
    vec_size = np.sum(
        [shape[0] for shape in group_spec['observation_shapes'] if len(shape) == 1]
    )
    vis_sizes = [shape for shape in group_spec['observation_shapes'] if len(shape) == 3]
    cam_res = [CameraResolution(s[0], s[1], s[2]) for s in vis_sizes]
    a_size: List[int] = []
    if group_spec['action_type'] == 'DISCRETE':
        a_size += list(group_spec['action_shape'])
        vector_action_space_type = 0
    else:
        a_size += [group_spec.action_size]
        vector_action_space_type = 1
    return BrainParameters(
        brain_name=name, vector_observation_space_size=int(vec_size), camera_resolutions=cam_res,
        vector_action_space_size=a_size, vector_action_descriptions=[],
        vector_action_space_type=vector_action_space_type
    )
Ejemplo n.º 5
0
def create_mock_brainparams(
    number_visual_observations=0,
    vector_action_space_type="continuous",
    vector_observation_space_size=3,
    vector_action_space_size=None,
):
    """
    Creates a mock BrainParameters object with parameters.
    """
    # Avoid using mutable object as default param
    if vector_action_space_size is None:
        vector_action_space_size = [2]
    mock_brain = mock.Mock()
    mock_brain.return_value.number_visual_observations = number_visual_observations
    mock_brain.return_value.vector_action_space_type = vector_action_space_type
    mock_brain.return_value.vector_observation_space_size = (
        vector_observation_space_size
    )
    camrez = CameraResolution(height=84, width=84, num_channels=3)
    mock_brain.return_value.camera_resolutions = [camrez] * number_visual_observations
    mock_brain.return_value.vector_action_space_size = vector_action_space_size
    mock_brain.return_value.brain_name = "MockBrain"
    return mock_brain()