Пример #1
0
    def test_get_actions(self, batch_size, hidden_sizes):
        """Test get_actions function."""
        env_spec = GymEnv(DummyBoxEnv())
        obs_dim = env_spec.observation_space.flat_dim
        act_dim = env_spec.action_space.flat_dim
        obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)
        init_std = 2.

        policy = GaussianMLPPolicy(env_spec=env_spec,
                                   hidden_sizes=hidden_sizes,
                                   init_std=init_std,
                                   hidden_nonlinearity=None,
                                   std_parameterization='exp',
                                   hidden_w_init=nn.init.ones_,
                                   output_w_init=nn.init.ones_)

        dist = policy(obs)[0]

        expected_mean = torch.full([batch_size, act_dim],
                                   obs_dim *
                                   (torch.Tensor(hidden_sizes).prod().item()),
                                   dtype=torch.float)
        expected_variance = init_std**2
        action, prob = policy.get_actions(obs)

        assert np.array_equal(prob['mean'], expected_mean.numpy())
        assert dist.variance.equal(
            torch.full((batch_size, act_dim),
                       expected_variance,
                       dtype=torch.float))
        assert action.shape == (batch_size, act_dim)
Пример #2
0
    def test_get_action_dict_space(self):
        """Test if observations from dict obs spaces are properly flattened."""
        env = GymEnv(DummyDictEnv(obs_space_type='box', act_space_type='box'))
        policy = GaussianMLPPolicy(env_spec=env.spec,
                                   hidden_nonlinearity=None,
                                   hidden_sizes=(1, ),
                                   hidden_w_init=nn.init.ones_,
                                   output_w_init=nn.init.ones_)
        obs = env.reset()[0]

        action, _ = policy.get_action(obs)
        assert env.action_space.shape == action.shape

        actions, _ = policy.get_actions(np.array([obs, obs]))
        for action in actions:
            assert env.action_space.shape == action.shape
        actions, _ = policy.get_actions(np.array([obs, obs]))
        for action in actions:
            assert env.action_space.shape == action.shape
Пример #3
0
def test_policy_get_actions(mock_model, input_dim, output_dim, hidden_sizes):
    action = torch.randn((output_dim, ))

    mock_dist = mock.MagicMock()
    mock_dist.rsample.return_value = action

    mock_model.return_value = mock_dist

    env_spec = mock.MagicMock()
    env_spec.observation_space.flat_dim = input_dim
    env_spec.action_space.flat_dim = output_dim

    policy = GaussianMLPPolicy(env_spec, mock_model)

    input = torch.ones(input_dim)
    sample = policy.get_actions(input)

    assert np.array_equal(sample, action.detach().numpy())
Пример #4
0
    def test_is_pickleable(self, batch_size, hidden_sizes):
        env_spec = TfEnv(DummyBoxEnv())
        obs_dim = env_spec.observation_space.flat_dim
        obs = torch.ones([batch_size, obs_dim], dtype=torch.float32)
        init_std = 2.

        policy = GaussianMLPPolicy(env_spec=env_spec,
                                   hidden_sizes=hidden_sizes,
                                   init_std=init_std,
                                   hidden_nonlinearity=None,
                                   std_parameterization='exp',
                                   hidden_w_init=nn.init.ones_,
                                   output_w_init=nn.init.ones_)

        output1_action, output1_prob = policy.get_actions(obs)

        p = pickle.dumps(policy)
        policy_pickled = pickle.loads(p)
        output2_action, output2_prob = policy_pickled.get_actions(obs)

        assert output1_prob['mean'].equal(output2_prob['mean'])
        assert output1_action.shape == output2_action.shape