def test_update_policy_and_value_function(self):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = ActorCritic('', observation_space, action_space)

        # Set up.
        self._setup_trajectory(sut)
        sut._process_accumulated_trajectory(True)
        sut._trainer = MagicMock()
        sut._adjust_learning_rate = MagicMock()

        # Call test method.
        sut._update_networks()

        # Verify value network behavior.
        self.assertEqual(sut._trainer.train_minibatch.call_count, 1)
        call_args = sut._trainer.train_minibatch.call_args
        np.testing.assert_array_equal(
            call_args[0][0][sut._input_variables],
            [np.array([0.1], np.float32),
             np.array([0.2], np.float32)])
        np.testing.assert_array_almost_equal(
            call_args[0][0][sut._value_network_output_variables],
            [[2.9975], [3.05]])
        np.testing.assert_array_equal(
            call_args[0][0][sut._policy_network_output_variables],
            [np.array([1, 0], np.float32),
             np.array([0, 1], np.float32)])
        np.testing.assert_array_almost_equal(
            call_args[0][0][sut._policy_network_weight_variables],
            [[0.9975], [2.05]])

        # Verify data buffer size.
        self.assertEqual(len(sut._input_buffer), 0)
    def test_init_dqn_preprocessing(self,
                                    mock_parameters,
                                    mock_replay_memory):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.preprocessing = \
            'cntk.contrib.deeprl.agent.shared.preprocessing.AtariPreprocessing'
        mock_parameters.return_value.preprocessing_args = '()'

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)

        # Preprocessor with default arguments.
        self.assertIsNotNone(sut._preprocessor)
        self.assertEqual(sut._preprocessor.output_shape(), (4, 84, 84))

        # Preprocessor with arguments passed as a tuple.
        mock_parameters.return_value.preprocessing_args = '(3,)'
        sut = QLearning('', observation_space, action_space)
        self.assertEqual(sut._preprocessor.output_shape(), (3, 84, 84))

        # Preprocessor with inappropriate arguments.
        mock_parameters.return_value.preprocessing_args = '(3, 4)'
        self.assertRaises(
            TypeError, QLearning, '', observation_space, action_space)

        # Undefined preprocessor.
        mock_parameters.return_value.preprocessing = 'undefined'
        self.assertRaises(
            ValueError, QLearning, '', observation_space, action_space)
    def test_init(self):
        # Discrete observation space.
        action_space = spaces.Discrete(2)
        observation_space = spaces.Discrete(3)
        sut = TabularQLearning('', observation_space, action_space)
        self.assertEqual(sut._num_actions, 2)
        self.assertEqual(sut._num_states, 3)
        self.assertEqual(sut._shape_of_inputs, (3, ))
        self.assertTrue(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)

        # Discretize observation space to default resolution.
        observation_space = spaces.Box(0, 1, (2, ))
        sut = TabularQLearning('', observation_space, action_space)
        self.assertEqual(sut._num_states, 100)
        self.assertEqual(sut._shape_of_inputs, (100, ))
        self.assertTrue(sut._discrete_observation_space)
        self.assertIsNotNone(sut._space_discretizer)
        # Verify encoding of state
        self.assertEqual(sut._discretize_state_if_necessary([0, 0]), 0)
        self.assertEqual(sut._discretize_state_if_necessary([0.05, 0]), 0)
        self.assertEqual(sut._discretize_state_if_necessary([0.95, 0]), 90)
        self.assertEqual(sut._discretize_state_if_necessary([0, 0.05]), 0)
        self.assertEqual(sut._discretize_state_if_necessary([0, 0.95]), 9)
        self.assertEqual(sut._discretize_state_if_necessary([0.1, 0.2]), 12)
        self.assertEqual(sut._discretize_state_if_necessary([1, 1]), 99)

        # Unsupported observation space for tabular representation
        observation_space = spaces.MultiBinary(10)
        self.assertRaises(ValueError, TabularQLearning, '', observation_space,
                          action_space)
    def test_init_shared_representation(self, mock_parameters):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.shared_representation = True

        sut = ActorCritic('', observation_space, action_space)

        self.assertEqual(sut._num_actions, 2)
        self.assertIsNone(sut._num_states)
        self.assertEqual(sut._shape_of_inputs, (1, ))
        self.assertFalse(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)

        self.assertTrue(
            set(sut._policy_network.parameters).issubset(
                set(sut._value_network.parameters)))
        diff = set(sut._value_network.parameters).difference(
            set(sut._policy_network.parameters))
        # one for W and one for b
        self.assertEqual(len(diff), 2)

        shapes = []
        for item in diff:
            shapes.append(item.shape)
        self.assertEqual(set(shapes), {(2, 1), (1, )})
    def test_update_q_dqn(self,
                          mock_parameters,
                          mock_replay_memory):
        self._setup_parameters(mock_parameters.return_value)
        self._setup_replay_memory(mock_replay_memory.return_value)

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)

        sut._q.eval = \
            MagicMock(return_value=np.array([[[0.2, 0.1]]], np.float32))
        sut._target_q.eval = \
            MagicMock(return_value=np.array([[[0.3, 0.4]]], np.float32))
        sut._trainer = MagicMock()

        sut._update_q_periodically()

        np.testing.assert_array_equal(
            sut._trainer.train_minibatch.call_args[0][0][sut._input_variables],
            [np.array([0.1], np.float32)])
        # 10 (reward) + 0.9 (gamma) x 0.4 (max q_target) -> update action 0
        np.testing.assert_array_equal(
            sut._trainer.train_minibatch.call_args[0][0][sut._output_variables],
            [np.array([10.36, 0.1], np.float32)])
    def test_rollout_preprocess(self, mock_parameters):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.preprocessing = \
            'cntk.contrib.deeprl.agent.shared.preprocessing.SlidingWindow'
        mock_parameters.return_value.preprocessing_args = '(2, "float32")'

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = ActorCritic('', observation_space, action_space)

        sut._choose_action = Mock(side_effect=[(0, ''), (1, ''), (1, '')])

        sut.start(np.array([0.1], np.float32))
        sut.step(0.1, np.array([0.2], np.float32))
        sut.step(0.2, np.array([0.3], np.float32))

        self.assertEqual(sut._trajectory_rewards, [0.1, 0.2])
        self.assertEqual(sut._trajectory_actions, [0, 1, 1])
        np.testing.assert_array_equal(sut._trajectory_states, [
            np.array([[0], [0.1]], np.float32),
            np.array([[0.1], [0.2]], np.float32),
            np.array([[0.2], [0.3]], np.float32)
        ])

        sut.end(0.3, np.array([0.4], np.float32))

        self.assertEqual(sut._trajectory_rewards, [0.1, 0.2, 0.3])
        self.assertEqual(sut._trajectory_actions, [0, 1, 1])
        np.testing.assert_array_equal(sut._trajectory_states, [
            np.array([[0], [0.1]], np.float32),
            np.array([[0.1], [0.2]], np.float32),
            np.array([[0.2], [0.3]], np.float32)
        ])
    def test_init_unsupported_q(self, mock_parameters):
        instance = mock_parameters.return_value
        instance.q_representation = 'undefined'
        instance.preprocessing = ''

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        self.assertRaises(
            ValueError, QLearning, '', observation_space, action_space)
Exemple #8
0
    def test_update_q(self, mock_parameters, mock_replay_memory):
        """Test if _update_q_periodically() can finish successfully."""
        self._setup_parameters(mock_parameters.return_value)
        self._setup_replay_memory(mock_replay_memory.return_value)

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = QLearning('', observation_space, action_space)
        sut._trainer.train_minibatch = MagicMock()
        sut._choose_action = MagicMock(side_effect=[
            (1, 'GREEDY'),
            (0, 'GREEDY'),
            (1, 'RANDOM'),
        ])

        action, debug_info = sut.start(np.array([0.1], np.float32))
        self.assertEqual(action, 1)
        self.assertEqual(debug_info['action_behavior'], 'GREEDY')
        self.assertEqual(sut.episode_count, 1)
        self.assertEqual(sut.step_count, 0)
        self.assertEqual(sut._epsilon, 0.1)
        self.assertEqual(sut._trainer.parameter_learners[0].learning_rate(),
                         0.1)
        self.assertEqual(sut._last_state, np.array([0.1], np.float32))
        self.assertEqual(sut._last_action, 1)

        action, debug_info = sut.step(1, np.array([0.2], np.float32))
        self.assertEqual(action, 0)
        self.assertEqual(debug_info['action_behavior'], 'GREEDY')
        self.assertEqual(sut.episode_count, 1)
        self.assertEqual(sut.step_count, 1)
        self.assertEqual(sut._epsilon, 0.09)
        # learning rate remains 0.1 as Q is not updated during this time step.
        self.assertEqual(sut._trainer.parameter_learners[0].learning_rate(),
                         0.1)
        self.assertEqual(sut._last_state, np.array([0.2], np.float32))
        self.assertEqual(sut._last_action, 0)

        action, debug_info = sut.step(2, np.array([0.3], np.float32))
        self.assertEqual(action, 1)
        self.assertEqual(debug_info['action_behavior'], 'RANDOM')
        self.assertEqual(sut.episode_count, 1)
        self.assertEqual(sut.step_count, 2)
        self.assertEqual(sut._epsilon, 0.08)
        self.assertEqual(sut._trainer.parameter_learners[0].learning_rate(),
                         0.08)
        self.assertEqual(sut._last_state, np.array([0.3], np.float32))
        self.assertEqual(sut._last_action, 1)

        sut.end(3, np.array([0.4], np.float32))
        self.assertEqual(sut.episode_count, 1)
        self.assertEqual(sut.step_count, 3)
        self.assertEqual(sut._epsilon, 0.08)
        # learning rate remains 0.08 as Q is not updated during this time step.
        self.assertEqual(sut._trainer.parameter_learners[0].learning_rate(),
                         0.08)
    def test_init_dqn_huber_loss(self, mock_parameters, mock_model):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.use_error_clipping = True
        mock_model.return_value = self._setup_test_model()

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)

        mock_model.assert_called_with((1,), 2, '[2]', huber_loss)
Exemple #10
0
    def test_scalar(self):
        s = spaces.Box(0, 1, (2, ))
        sut = BoxSpaceDiscretizer(s, 10)

        self.assertEqual(sut.discretize([0, 0]), 0)
        self.assertEqual(sut.discretize([0.05, 0]), 0)
        self.assertEqual(sut.discretize([0.95, 0]), 90)
        self.assertEqual(sut.discretize([0, 0.05]), 0)
        self.assertEqual(sut.discretize([0, 0.95]), 9)
        self.assertEqual(sut.discretize([0.1, 0.2]), 12)
        self.assertEqual(sut.discretize([1, 1]), 99)
Exemple #11
0
    def test_init_dqn_prioritized_replay(self, mock_parameters,
                                         mock_replay_memory):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.use_prioritized_replay = True

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = QLearning('', observation_space, action_space)

        self.assertIsNotNone(sut._weight_variables)
        mock_replay_memory.assert_called_with(100, True)
Exemple #12
0
    def test_init_box_observation_space(self):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = FakeAgentBaseClass(observation_space, action_space)

        self.assertEqual(sut._num_actions, 2)
        self.assertIsNone(sut._num_states)
        self.assertEqual(sut._shape_of_inputs, (1, ))
        self.assertFalse(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)
    def test_update_q_dqn_prioritized_replay(self,
                                             mock_parameters,
                                             mock_replay_memory):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.use_prioritized_replay = True
        self._setup_prioritized_replay_memory(mock_replay_memory.return_value)

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)

        def new_q_value(self):
            return np.array([[[0.2, 0.1]]], np.float32)
        sut._q.eval = MagicMock(side_effect=new_q_value)
        sut._target_q.eval = MagicMock(
            return_value=np.array([[[0.3, 0.4]]], np.float32))
        sut._trainer = MagicMock()

        sut._update_q_periodically()

        self.assertEqual(sut._trainer.train_minibatch.call_count, 1)
        np.testing.assert_array_equal(
            sut._trainer.train_minibatch.call_args[0][0][sut._input_variables],
            [
                np.array([0.1], np.float32),
                np.array([0.3], np.float32),
                np.array([0.1], np.float32)
            ])
        np.testing.assert_array_equal(
            sut._trainer.train_minibatch.call_args[0][0][sut._output_variables],
            [
                # 10 (reward) + 0.9 (gamma) x 0.4 (max q_target)
                np.array([10.36, 0.1], np.float32),
                # 11 (reward) + 0.9 (gamma) x 0.4 (max q_target)
                np.array([0.2, 11.36], np.float32),
                np.array([10.36, 0.1], np.float32)
            ])
        np.testing.assert_almost_equal(
            sut._trainer.train_minibatch.call_args[0][0][sut._weight_variables],
            [
                [0.16666667],
                [0.66666667],
                [0.16666667]
            ])
        self.assertAlmostEqual(
            sut._replay_memory.update_priority.call_args[0][0][3],
            105.2676)  # (10.16 + 0.1)^2
        self.assertAlmostEqual(
            sut._replay_memory.update_priority.call_args[0][0][4],
            129.0496,
            places=6)  # (11.26 + 0.1) ^ 2
Exemple #14
0
    def test_array(self):
        s = spaces.Box(0, 1, (2, 2))
        sut = BoxSpaceDiscretizer(s, np.array([[2, 2], [2, 2]]))

        self.assertEqual(sut.discretize([[0, 0], [0, 0]]), 0)
        self.assertEqual(sut.discretize([[0.05, 0], [0, 0]]), 0)
        self.assertEqual(sut.discretize([[0.95, 0], [0, 0]]), 8)
        self.assertEqual(sut.discretize([[0, 0.05], [0, 0]]), 0)
        self.assertEqual(sut.discretize([[0, 0.95], [0, 0]]), 4)
        self.assertEqual(sut.discretize([[0, 0], [0.05, 0]]), 0)
        self.assertEqual(sut.discretize([[0, 0], [0.95, 0]]), 2)
        self.assertEqual(sut.discretize([[0, 0], [0, 0.05]]), 0)
        self.assertEqual(sut.discretize([[0, 0], [0, 0.95]]), 1)
        self.assertEqual(sut.discretize([[0.1, 0.6], [0.5, 0.2]]), 6)
        self.assertEqual(sut.discretize([[1, 1], [1, 1]]), 15)
    def test_process_accumulated_trajectory_keep_last(self):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = ActorCritic('', observation_space, action_space)

        # Set up.
        self._setup_trajectory(sut)

        # Call test method.
        sut._process_accumulated_trajectory(True)

        # Verify results.
        self.assertEqual(len(sut._trajectory_rewards), 0)
        self.assertEqual(len(sut._trajectory_actions), 0)
        self.assertEqual(sut._trajectory_states, [np.array([0.3], np.float32)])
    def test_init_from_existing_model(self, mock_parameters):
        action_space = spaces.Discrete(3)
        observation_space = spaces.Box(np.array([-1.2, -0.07]),
                                       np.array([0.6, 0.07]))
        mock_parameters.return_value.policy_representation = 'nn'
        mock_parameters.return_value.policy_network_hidden_layers = '[2]'
        mock_parameters.return_value.initial_policy_network = \
            'tests/data/initial_policy_network.dnn'
        mock_parameters.return_value.preprocessing = ''

        sut = ActorCritic('', observation_space, action_space)

        self.assertEqual(sut._num_actions, 3)
        self.assertIsNone(sut._num_states)
        self.assertEqual(sut._shape_of_inputs, (2, ))
        self.assertFalse(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)

        # Incompatible network structure.
        mock_parameters.return_value.policy_network_hidden_layers = '[]'
        self.assertRaises(Exception, ActorCritic, '', observation_space,
                          action_space)

        # Incompatible action space.
        mock_parameters.return_value.policy_network_hidden_layers = '[2]'
        action_space = spaces.Discrete(2)
        self.assertRaises(ValueError, ActorCritic, '', observation_space,
                          action_space)

        # Incompatible observation space.
        action_space = spaces.Discrete(3)
        observation_space = spaces.Box(np.array([-1.2, -0.07, -1.0]),
                                       np.array([0.6, 0.07, 1.0]))
        self.assertRaises(ValueError, ActorCritic, '', observation_space,
                          action_space)
    def test_rollout_with_update(self, mock_parameters):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.update_frequency = 2

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = ActorCritic('', observation_space, action_space)
        sut._update_networks = MagicMock()

        sut._choose_action = Mock(
            side_effect=[(0, ''), (1, ''), (1, ''), (0, ''), (1, ''), (0, '')])

        sut.start(np.array([0.1], np.float32))
        sut.step(0.1, np.array([0.2], np.float32))
        self.assertEqual(sut._trajectory_rewards, [0.1])
        self.assertEqual(sut._trajectory_actions, [0, 1])
        self.assertEqual(sut._trajectory_states, [0.1, 0.2])
        self.assertEqual(sut._update_networks.call_count, 0)

        sut.step(0.2, np.array([0.3], np.float32))
        self.assertEqual(sut._trajectory_rewards, [])
        self.assertEqual(sut._trajectory_actions, [1])
        self.assertEqual(sut._trajectory_states, [0.3])
        self.assertEqual(sut._update_networks.call_count, 1)

        sut.step(0.3, np.array([0.4], np.float32))
        self.assertEqual(sut._trajectory_rewards, [0.3])
        self.assertEqual(sut._trajectory_actions, [1, 0])
        self.assertEqual(sut._trajectory_states, [0.3, 0.4])
        self.assertEqual(sut._update_networks.call_count, 1)

        sut.start(np.array([0.5], np.float32))
        self.assertEqual(sut._trajectory_rewards, [])
        self.assertEqual(sut._trajectory_actions, [1])
        self.assertEqual(sut._trajectory_states, [0.5])
        self.assertEqual(sut._update_networks.call_count, 1)

        sut.step(0.4, np.array([0.6], np.float32))
        self.assertEqual(sut._trajectory_rewards, [])
        self.assertEqual(sut._trajectory_actions, [0])
        self.assertEqual(sut._trajectory_states, [0.6])
        self.assertEqual(sut._update_networks.call_count, 2)

        sut.end(0.5, np.array([0.7], np.float32))
        self.assertEqual(sut._trajectory_rewards, [0.5])
        self.assertEqual(sut._trajectory_actions, [0])
        self.assertEqual(sut._trajectory_states, [0.6])
        self.assertEqual(sut._update_networks.call_count, 2)
    def test_init_unsupported_model(self, mock_parameters):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        self._setup_parameters(mock_parameters.return_value)

        # Verify sut can be constructed.
        sut = ActorCritic('', observation_space, action_space)

        mock_parameters.return_value.policy_representation = 'undefined'
        self.assertRaises(ValueError, ActorCritic, '', observation_space,
                          action_space)

        mock_parameters.return_value.policy_representation = 'nn'
        mock_parameters.return_value.value_function_representation = 'undefined'
        self.assertRaises(ValueError, ActorCritic, '', observation_space,
                          action_space)
    def test_replay_start_size(self, mock_parameters):
        self._setup_parameters(mock_parameters.return_value)
        # Set exploration rate to 0
        mock_parameters.return_value.initial_epsilon = 0
        mock_parameters.return_value.epsilon_decay_step_count = 100
        mock_parameters.return_value.epsilon_minimum = 0
        mock_parameters.return_value.replay_start_size = 3

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)
        sut._trainer = MagicMock()
        sut._replay_memory = MagicMock()

        _, debug = sut.start(np.array([0.1], np.float32))
        self.assertEqual(sut.step_count, 0)
        self.assertEqual(sut._trainer.train_minibatch.call_count, 0)
        self.assertEqual(debug['action_behavior'], 'RANDOM')

        _, debug = sut.step(0.1, np.array([0.2], np.float32))
        self.assertEqual(sut.step_count, 1)
        self.assertEqual(sut._trainer.train_minibatch.call_count, 0)
        self.assertEqual(debug['action_behavior'], 'RANDOM')

        sut.end(0.2, np.array([0.3], np.float32))
        self.assertEqual(sut.step_count, 2)
        self.assertEqual(sut._trainer.train_minibatch.call_count, 0)

        _, debug = sut.start(np.array([0.4], np.float32))
        self.assertEqual(sut.step_count, 2)
        self.assertEqual(sut._trainer.train_minibatch.call_count, 0)
        self.assertEqual(debug['action_behavior'], 'RANDOM')

        a, debug = sut.step(0.3, np.array([0.5], np.float32))
        self.assertEqual(sut.step_count, 3)
        self.assertEqual(sut._trainer.train_minibatch.call_count, 0)
        self.assertEqual(debug['action_behavior'], 'GREEDY')

        a, debug = sut.start(np.array([0.6], np.float32))
        self.assertEqual(sut.step_count, 3)
        self.assertEqual(sut._trainer.train_minibatch.call_count, 0)
        self.assertEqual(debug['action_behavior'], 'GREEDY')

        a, debug = sut.step(0.4, np.array([0.7], np.float32))
        self.assertEqual(sut.step_count, 4)
        self.assertEqual(sut._trainer.train_minibatch.call_count, 1)
        self.assertEqual(debug['action_behavior'], 'GREEDY')
    def test_init_customized_q(self, mock_parameters, mock_model):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.q_representation = \
            'cntk.contrib.deeprl.agent.shared.customized_models.conv_dqn'
        mock_model.return_value = self._setup_test_model()

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)

        self.assertEqual(sut._num_actions, 2)
        self.assertIsNone(sut._num_states)
        self.assertEqual(sut._shape_of_inputs, (1,))
        self.assertFalse(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)
        mock_model.assert_called_with((1,), 2, None)
    def test_init_dueling_dqn(self, mock_parameters, mock_model):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.q_representation = 'dueling-dqn'
        mock_parameters.return_value.hidden_layers = '[2, [2], [2]]'
        mock_model.return_value = self._setup_test_model()

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)

        self.assertEqual(sut._num_actions, 2)
        self.assertIsNone(sut._num_states)
        self.assertEqual(sut._shape_of_inputs, (1,))
        self.assertFalse(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)
        mock_model.assert_called_with((1,), 2, '[2, [2], [2]]', None)
Exemple #22
0
    def test_init_dqn(self, mock_parameters, mock_model, mock_replay_memory):
        self._setup_parameters(mock_parameters.return_value)
        mock_model.return_value = self._setup_test_model()

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = QLearning('', observation_space, action_space)

        self.assertEqual(sut._num_actions, 2)
        self.assertIsNone(sut._num_states)
        self.assertEqual(sut._shape_of_inputs, (1, ))
        self.assertFalse(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)
        self.assertFalse(hasattr(sut, 'weight_variables'))
        self.assertIsNotNone(sut._trainer)
        mock_model.assert_called_with((1, ), 2, '[2]', None)
        mock_replay_memory.assert_called_with(100, False)
    def test_populate_replay_memory(self, mock_parameters):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.preprocessing = \
            'cntk.contrib.deeprl.agent.shared.preprocessing.SlidingWindow'
        mock_parameters.return_value.preprocessing_args = '(2, )'

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = QLearning('', observation_space, action_space)

        sut._compute_priority = Mock(side_effect=[1, 2, 3])
        sut._choose_action = Mock(
            side_effect=[(0, ''), (0, ''), (1, ''), (1, '')])
        sut._replay_memory = MagicMock()
        sut._update_q_periodically = MagicMock()

        sut.start(np.array([0.1], np.float32))
        sut.step(0.1, np.array([0.2], np.float32))
        sut.step(0.2, np.array([0.3], np.float32))
        sut.end(0.3, np.array([0.4], np.float32))

        self.assertEqual(sut._replay_memory.store.call_count, 3)

        call_args = sut._replay_memory.store.call_args_list[0]
        np.testing.assert_array_equal(
            call_args[0][0],
            np.array([[0], [0.1]], np.float32))
        self.assertEqual(call_args[0][1], 0)
        self.assertEqual(call_args[0][2], 0.1)
        np.testing.assert_array_equal(
            call_args[0][3],
            np.array([[0.1], [0.2]], np.float32))
        self.assertEqual(call_args[0][4], 1)

        call_args = sut._replay_memory.store.call_args_list[2]
        np.testing.assert_array_equal(
            call_args[0][0],
            np.array([[0.2], [0.3]], np.float32))
        self.assertEqual(call_args[0][1], 1)
        self.assertEqual(call_args[0][2], 0.3)
        self.assertIsNone(call_args[0][3])
        self.assertEqual(call_args[0][4], 3)
    def test_rollout(self):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1, ))
        sut = ActorCritic('', observation_space, action_space)

        sut._choose_action = Mock(side_effect=[(0, ''), (1, ''), (1, '')])

        sut.start(np.array([0.1], np.float32))
        sut.step(0.1, np.array([0.2], np.float32))
        sut.step(0.2, np.array([0.3], np.float32))

        self.assertEqual(sut._trajectory_rewards, [0.1, 0.2])
        self.assertEqual(sut._trajectory_actions, [0, 1, 1])
        self.assertEqual(sut._trajectory_states, [0.1, 0.2, 0.3])

        sut.end(0.3, np.array([0.4], np.float32))

        self.assertEqual(sut._trajectory_rewards, [0.1, 0.2, 0.3])
        self.assertEqual(sut._trajectory_actions, [0, 1, 1])
        self.assertEqual(sut._trajectory_states, [0.1, 0.2, 0.3])
Exemple #25
0
    def test_init_customized_model(self, mock_parameters, mock_model):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.policy_representation = \
            'cntk.contrib.deeprl.agent.shared.customized_models.conv_dqn'
        mock_parameters.return_value.value_function_representation = \
            'cntk.contrib.deeprl.agent.shared.customized_models.conv_dqn'
        mock_model.side_effect = self._setup_test_model

        sut = ActorCritic('', observation_space, action_space)

        self.assertEqual(mock_model.call_count, 2)
        mock_model.assert_has_calls(
            [
                mock.call((1,), 2, cross_entropy_with_softmax,
                    use_placeholder_for_input=True),
                mock.call((1,), 1, use_placeholder_for_input=True)
            ],
            any_order=True)
Exemple #26
0
    def test_list(self):
        s = spaces.Box(0, 1, (2, ))
        sut = BoxSpaceDiscretizer(s, np.array([10, 2]))

        self.assertEqual(sut.discretize([0, 0]), 0)
        self.assertEqual(sut.discretize([0.05, 0]), 0)
        self.assertEqual(sut.discretize([0.95, 0]), 18)
        self.assertEqual(sut.discretize([0, 0.05]), 0)
        self.assertEqual(sut.discretize([0, 0.95]), 1)
        self.assertEqual(sut.discretize([0.1, 0.2]), 2)
        self.assertEqual(sut.discretize([1, 1]), 19)

        sut = BoxSpaceDiscretizer(s, np.array([10, 1]))

        self.assertEqual(sut.discretize([0, 0]), 0)
        self.assertEqual(sut.discretize([0.05, 0]), 0)
        self.assertEqual(sut.discretize([0.95, 0]), 9)
        self.assertEqual(sut.discretize([0, 0.05]), 0)
        self.assertEqual(sut.discretize([0, 0.95]), 0)
        self.assertEqual(sut.discretize([0.1, 0.2]), 1)
        self.assertEqual(sut.discretize([1, 1]), 9)
Exemple #27
0
    def test_init_preprocess(self, mock_parameters, mock_model):
        self._setup_parameters(mock_parameters.return_value)
        mock_parameters.return_value.preprocessing = \
            'cntk.contrib.deeprl.agent.shared.preprocessing.SlidingWindow'
        mock_parameters.return_value.preprocessing_args = '(2, )'
        mock_model.side_effect = self._setup_test_model

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = ActorCritic('', observation_space, action_space)

        self.assertIsNotNone(sut._preprocessor)
        self.assertEqual(sut._preprocessor.output_shape(), (2, 1))
        self.assertEqual(mock_model.call_count, 2)
        mock_model.assert_has_calls(
            [
                mock.call((2, 1), 2, '[2]', cross_entropy_with_softmax,
                    use_placeholder_for_input=True),
                mock.call((2, 1), 1, '[2]', use_placeholder_for_input=True)
            ],
            any_order=True)
Exemple #28
0
    def test_init(self, mock_model):
        mock_model.side_effect = self._setup_test_model

        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = ActorCritic('', observation_space, action_space)

        self.assertEqual(sut._num_actions, 2)
        self.assertIsNone(sut._num_states)
        self.assertEqual(sut._shape_of_inputs, (1,))
        self.assertFalse(sut._discrete_observation_space)
        self.assertIsNone(sut._space_discretizer)
        self.assertIsNone(sut._preprocessor)
        self.assertEqual(mock_model.call_count, 2)
        mock_model.assert_has_calls(
            [
                mock.call((1,), 2, '[10]', cross_entropy_with_softmax,
                    use_placeholder_for_input=True),
                mock.call((1,), 1, '[10]', use_placeholder_for_input=True)
            ],
            any_order=True)
Exemple #29
0
    def test_process_accumulated_trajectory(self):
        action_space = spaces.Discrete(2)
        observation_space = spaces.Box(0, 1, (1,))
        sut = ActorCritic('', observation_space, action_space)

        # Set up.
        self._setup_trajectory(sut)

        # Call test method.
        sut._process_accumulated_trajectory(False)

        # Verify results.
        self.assertEqual(len(sut._trajectory_rewards), 0)
        self.assertEqual(len(sut._trajectory_actions), 0)
        self.assertEqual(len(sut._trajectory_states), 0)

        np.testing.assert_array_equal(
            sut._input_buffer,
            [np.array([0.1], np.float32), np.array([0.2], np.float32)])
        # For unknown reason, got [2.9974999999999996] instead of [2.9975] for
        # the following testcase, therefore use assert_array_almost_equal.
        np.testing.assert_array_almost_equal(
            sut._value_network_output_buffer,
            [
                [2.9975],    # 3.05 * 0.95 + 0.1
                [3.05]       # 3 (initial_r) * 0.95 + 0.2
            ])
        np.testing.assert_array_equal(
            sut._policy_network_output_buffer,
            [
                np.array([1, 0], np.float32),
                np.array([0, 1], np.float32)
            ]
        )
        np.testing.assert_array_almost_equal(
            sut._policy_network_weight_buffer,
            [
                [0.9975],    # 2.9975 - 2
                [2.05]       # 3.05 - 1
            ])
Exemple #30
0
 def test_init_unsupported_action_space(self):
     action_space = spaces.Box(0, 1, (1, ))
     observation_space = spaces.Discrete(3)
     self.assertRaises(ValueError, FakeAgentBaseClass, observation_space,
                       action_space)