コード例 #1
0
ファイル: test_mcts.py プロジェクト: tlegen-k/adviser
    def test_pre_run(self) -> None:
        """Test calling pre-run for the initialization part."""
        flexmock(TemporalDifference)
        TemporalDifference.should_receive("pre_run").once()

        state = flexmock()
        predictor = MCTS()
        assert predictor._next_state is None
        predictor._next_state = state
        predictor.pre_run()
        assert predictor._next_state is None
コード例 #2
0
ファイル: test_mcts.py プロジェクト: tlegen-k/adviser
    def test_run_no_next_state(self, context: Context) -> None:
        """Test running the predictor when no next state is scheduled."""
        predictor = MCTS()
        assert predictor._next_state is None

        # If no next state kept, we follow logic from the TD-learning.
        flexmock(TemporalDifference)
        state = flexmock()
        unresolved_dependency = flexmock()
        TemporalDifference.should_receive("run").with_args().and_return(state, unresolved_dependency).once()
        context.iteration = 1000000  # Some big number not to hit the heat-up part.
        with predictor.assigned_context(context):
            assert predictor.run() == (state, unresolved_dependency)
コード例 #3
0
ファイル: test_mcts.py プロジェクト: tlegen-k/adviser
    def test_run_heat_up(self, context: Context, next_state) -> None:
        """Test running the predictor in the "heat-up" phase regardless next state being set."""
        state = flexmock()
        unresolved_dependency = ("tensorflow", "2.0.0", "https://pypi.org/simple")

        predictor = MCTS()
        predictor._next_state = None

        flexmock(TemporalDifference)
        TemporalDifference.should_receive("run").with_args().and_return(state, unresolved_dependency).once()

        context.iteration = 1  # Some small number to hit the heat-up part.
        with predictor.assigned_context(context):
            assert predictor.run() == (state, unresolved_dependency)
コード例 #4
0
ファイル: test_mcts.py プロジェクト: tlegen-k/adviser
    def test_run_next_state_no_last(self, context: Context) -> None:
        """Test running the predictor when the next state is not last state added to beam."""
        state = flexmock()
        unresolved_dependency = ("tensorflow", "2.0.0", "https://pypi.org/simple")

        predictor = MCTS()
        predictor._next_state = flexmock()
        context.beam.should_receive("get_last").and_return(flexmock()).once()

        flexmock(TemporalDifference)
        TemporalDifference.should_receive("run").with_args().and_return(state, unresolved_dependency).once()

        context.iteration = 1000000  # Some big number not to hit the heat-up part.
        with predictor.assigned_context(context):
            assert predictor.run() == (state, unresolved_dependency)
コード例 #5
0
ファイル: test_td.py プロジェクト: KPostOffice/adviser
    def test_run_exploration(self, context: Context) -> None:
        """Tests run when exploration is performed."""
        flexmock(TemporalDifference)
        flexmock(TemporalDifference)
        flexmock(AdaptiveSimulatedAnnealing)

        flexmock(State)
        max_state = State(score=3.0)
        probable_state = State(score=2.0)

        context.beam.add_state(max_state)
        context.beam.add_state(probable_state)

        unresolved_dependency = (
            "pytorch",
            "1.0.0",
            "https://thoth-station.ninja/simple",
        )

        flexmock(random)
        random.should_receive("randrange").with_args(1, 2).and_return(0).once()
        random.should_receive("random").and_return(0.50).once(
        )  # *lower* than acceptance_probability that is 0.75 so we do exploitation
        probable_state.should_receive(
            "get_random_unresolved_dependency").with_args(
                prefer_recent=True).and_return(unresolved_dependency).once()
        TemporalDifference.should_receive("_temperature_function").with_args(
            1.0, context).and_return(0.9).once()
        AdaptiveSimulatedAnnealing.should_receive(
            "_compute_acceptance_probability").with_args(
                max_state.score, probable_state.score,
                0.9).and_return(0.75).once()
        context.beam.should_receive("max").with_args().and_return(
            max_state).once()

        predictor = TemporalDifference(step=1)
        predictor._steps_taken = 0
        predictor._temperature = 1.0
        with predictor.assigned_context(context):
            assert predictor.run() == (probable_state, unresolved_dependency)
            assert predictor._steps_taken == 1