コード例 #1
0
ファイル: test_td.py プロジェクト: KPostOffice/adviser
    def test_n_step_td_step_adjust(self, context: Context) -> None:
        """Test adjusting steps taken on reward signal propagation."""
        predictor = TemporalDifference(step=1)
        predictor._temperature = 1.0
        predictor._steps_taken = 1
        package_tuple = ("tensorflow", "2.3.1", "https://pypi.org/simple")
        state = State()
        state.add_resolved_dependency(package_tuple)
        with predictor.assigned_context(context):
            predictor.set_reward_signal(state, package_tuple, 0.33)

        assert predictor._policy.get(package_tuple) == [0.33, 1]
        assert predictor._steps_taken == 0
コード例 #2
0
ファイル: test_td.py プロジェクト: KPostOffice/adviser
    def test_set_reward_signal_nan_inf(self, float_case: float) -> None:
        """Test (not) keeping the reward signal for nan/inf."""
        predictor = TemporalDifference()
        state = State()
        state.add_resolved_dependency(
            ("tensorflow", "2.3.0", "https://pypi.org/simple"))
        state.add_resolved_dependency(
            ("flask", "0.12", "https://pypi.org/simple"))
        state.add_unresolved_dependency(
            ("termial-random", "0.0.2", "https://pypi.org/simple"))
        predictor._policy = {
            ("flask", "0.12", "https://pypi.org/simple"): [0.2, 1],
        }
        predictor._steps_taken = 2
        predictor._steps_reward = 1.2
        predictor._next_state = state

        assert (predictor.set_reward_signal(
            state, ("tensorflow", "2.0.0", "https://pypi.org/simple"),
            float_case) is None)

        assert predictor._policy == {
            ("flask", "0.12", "https://pypi.org/simple"): [1.4, 2],
            ("tensorflow", "2.3.0", "https://pypi.org/simple"): [1.2, 1],
        }
        assert predictor._steps_taken == 0
        assert predictor._steps_reward == 0.0
        assert predictor._next_state is None
コード例 #3
0
    def test_set_reward_signal_unseen(self) -> None:
        """Test keeping the reward signal for an unseen step."""
        reward = 42.24
        package_tuple = ("tensorflow", "2.0.0", "https://thoth-station.ninja")

        state = flexmock()
        state.should_receive("iter_resolved_dependencies").and_return([package_tuple]).once()

        predictor = TemporalDifference()
        predictor._policy = {
            ("numpy", "1.0.0", "https://pypi.org/simple"): [30.30, 92],
        }

        predictor._steps_taken = 1
        predictor.set_reward_signal(state, None, reward)

        assert predictor._policy == {
            package_tuple: [42.24, 1],
            ("numpy", "1.0.0", "https://pypi.org/simple"): [30.30, 92],
        }