def test_set_reward_signal_nan_inf(self, float_case: float) -> None: """Test (not) keeping the reward signal for nan/inf.""" predictor = TemporalDifference() state = State() state.add_resolved_dependency( ("tensorflow", "2.3.0", "https://pypi.org/simple")) state.add_resolved_dependency( ("flask", "0.12", "https://pypi.org/simple")) state.add_unresolved_dependency( ("termial-random", "0.0.2", "https://pypi.org/simple")) predictor._policy = { ("flask", "0.12", "https://pypi.org/simple"): [0.2, 1], } predictor._steps_taken = 2 predictor._steps_reward = 1.2 predictor._next_state = state assert (predictor.set_reward_signal( state, ("tensorflow", "2.0.0", "https://pypi.org/simple"), float_case) is None) assert predictor._policy == { ("flask", "0.12", "https://pypi.org/simple"): [1.4, 2], ("tensorflow", "2.3.0", "https://pypi.org/simple"): [1.2, 1], } assert predictor._steps_taken == 0 assert predictor._steps_reward == 0.0 assert predictor._next_state is None
def test_n_step_td_step_no_adjust(self, context: Context) -> None: """Test adjusting steps taken on reward signal propagation.""" predictor = TemporalDifference(step=1) predictor._temperature = 1.0 predictor._steps_taken = 0 package_tuple = ("tensorflow", "2.3.1", "https://pypi.org/simple") state = State() state.add_resolved_dependency(package_tuple) with predictor.assigned_context(context): predictor.set_reward_signal(state, package_tuple, 0.33) assert predictor._policy.get(package_tuple) is None predictor._steps_taken = 1 with predictor.assigned_context(context): predictor.set_reward_signal(state, package_tuple, 0.2) assert predictor._policy.get(package_tuple) == [0.53, 1] assert predictor._steps_taken == 0
def test_set_reward_signal_unseen(self) -> None: """Test keeping the reward signal for an unseen step.""" reward = 42.24 package_tuple = ("tensorflow", "2.0.0", "https://thoth-station.ninja") state = flexmock() state.should_receive("iter_resolved_dependencies").and_return([package_tuple]).once() predictor = TemporalDifference() predictor._policy = { ("numpy", "1.0.0", "https://pypi.org/simple"): [30.30, 92], } predictor._steps_taken = 1 predictor.set_reward_signal(state, None, reward) assert predictor._policy == { package_tuple: [42.24, 1], ("numpy", "1.0.0", "https://pypi.org/simple"): [30.30, 92], }
def test_run_exploration(self, context: Context) -> None: """Tests run when exploration is performed.""" flexmock(TemporalDifference) flexmock(TemporalDifference) flexmock(AdaptiveSimulatedAnnealing) flexmock(State) max_state = State(score=3.0) probable_state = State(score=2.0) context.beam.add_state(max_state) context.beam.add_state(probable_state) unresolved_dependency = ( "pytorch", "1.0.0", "https://thoth-station.ninja/simple", ) flexmock(random) random.should_receive("randrange").with_args(1, 2).and_return(0).once() random.should_receive("random").and_return(0.50).once( ) # *lower* than acceptance_probability that is 0.75 so we do exploitation probable_state.should_receive( "get_random_unresolved_dependency").with_args( prefer_recent=True).and_return(unresolved_dependency).once() TemporalDifference.should_receive("_temperature_function").with_args( 1.0, context).and_return(0.9).once() AdaptiveSimulatedAnnealing.should_receive( "_compute_acceptance_probability").with_args( max_state.score, probable_state.score, 0.9).and_return(0.75).once() context.beam.should_receive("max").with_args().and_return( max_state).once() predictor = TemporalDifference(step=1) predictor._steps_taken = 0 predictor._temperature = 1.0 with predictor.assigned_context(context): assert predictor.run() == (probable_state, unresolved_dependency) assert predictor._steps_taken == 1