def test_create_inputs(encoder_type, normalize, num_vector, num_visual): vec_obs_shape = (5,) vis_obs_shape = (84, 84, 3) obs_shapes = [] for _ in range(num_vector): obs_shapes.append(vec_obs_shape) for _ in range(num_visual): obs_shapes.append(vis_obs_shape) h_size = 128 obs_spec = create_observation_specs_with_shapes(obs_shapes) encoders, embedding_sizes = ModelUtils.create_input_processors( obs_spec, h_size, encoder_type, h_size, normalize ) total_output = sum(embedding_sizes) vec_enc = [] vis_enc = [] for i, enc in enumerate(encoders): if len(obs_shapes[i]) == 1: vec_enc.append(enc) else: vis_enc.append(enc) assert len(vec_enc) == num_vector assert len(vis_enc) == num_visual assert total_output == int(num_visual * h_size + vec_obs_shape[0] * num_vector) if num_vector > 0: assert isinstance(vec_enc[0], VectorInput) for enc in vis_enc: assert isinstance(enc, ModelUtils.get_encoder_for_type(encoder_type))
def test_create_encoders(encoder_type, normalize, num_vector, num_visual, unnormalized_inputs): vec_obs_shape = (5, ) vis_obs_shape = (84, 84, 3) obs_shapes = [] for _ in range(num_vector): obs_shapes.append(vec_obs_shape) for _ in range(num_visual): obs_shapes.append(vis_obs_shape) h_size = 128 num_layers = 3 unnormalized_inputs = 1 vis_enc, vec_enc = ModelUtils.create_encoders(obs_shapes, h_size, num_layers, encoder_type, unnormalized_inputs, normalize) vec_enc = list(vec_enc) vis_enc = list(vis_enc) assert len(vec_enc) == (1 if unnormalized_inputs + num_vector > 0 else 0 ) # There's always at most one vector encoder. assert len(vis_enc) == num_visual if unnormalized_inputs > 0: assert isinstance(vec_enc[0], VectorAndUnnormalizedInputEncoder) elif num_vector > 0: assert isinstance(vec_enc[0], VectorEncoder) for enc in vis_enc: assert isinstance(enc, ModelUtils.get_encoder_for_type(encoder_type))
def test_min_visual_size(): # Make sure each EncoderType has an entry in MIS_RESOLUTION_FOR_ENCODER assert set(ModelUtils.MIN_RESOLUTION_FOR_ENCODER.keys()) == set(EncoderType) for encoder_type in EncoderType: good_size = ModelUtils.MIN_RESOLUTION_FOR_ENCODER[encoder_type] vis_input = torch.ones((1, good_size, good_size, 3)) ModelUtils._check_resolution_for_encoder(good_size, good_size, encoder_type) enc_func = ModelUtils.get_encoder_for_type(encoder_type) enc = enc_func(good_size, good_size, 3, 1) enc.forward(vis_input) # Anything under the min size should raise an exception. If not, decrease the min size! with pytest.raises(Exception): bad_size = ModelUtils.MIN_RESOLUTION_FOR_ENCODER[encoder_type] - 1 vis_input = torch.ones((1, bad_size, bad_size, 3)) with pytest.raises(UnityTrainerException): # Make sure we'd hit a friendly error during model setup time. ModelUtils._check_resolution_for_encoder( bad_size, bad_size, encoder_type ) enc = enc_func(bad_size, bad_size, 3, 1) enc.forward(vis_input)
def test_create_inputs(encoder_type, normalize, num_vector, num_visual): vec_obs_shape = (5, ) vis_obs_shape = (84, 84, 3) obs_shapes = [] for _ in range(num_vector): obs_shapes.append(vec_obs_shape) for _ in range(num_visual): obs_shapes.append(vis_obs_shape) h_size = 128 vis_enc, vec_enc, total_output = ModelUtils.create_input_processors( obs_shapes, h_size, encoder_type, normalize) vec_enc = list(vec_enc) vis_enc = list(vis_enc) assert len(vec_enc) == (1 if num_vector >= 1 else 0) assert len(vis_enc) == num_visual assert total_output == int(num_visual * h_size + vec_obs_shape[0] * num_vector) if num_vector > 0: assert isinstance(vec_enc[0], VectorInput) for enc in vis_enc: assert isinstance(enc, ModelUtils.get_encoder_for_type(encoder_type))