def test_run( self, state_factory: Callable[[], State], state_count: int, limit: int, count: int, iteration: int, accepted_final_states: int, ) -> None: """Test running the annealing.""" beam = Beam() state = state_factory() for _ in range(state_count): cloned_state = state.clone() cloned_state.iteration = state.iteration + 1 beam.add_state(cloned_state) predictor = AdaptiveSimulatedAnnealing() context = flexmock( accepted_final_states_count=accepted_final_states, count=count, iteration=iteration, limit=limit, beam=beam, ) with predictor.assigned_context(context): next_state, package_tuple = predictor.run() assert next_state in beam.iter_states() assert package_tuple is not None assert package_tuple[0] in next_state.unresolved_dependencies assert package_tuple in next_state.unresolved_dependencies[ package_tuple[0]].values()
def test_acceptance_probability(self, top_score: float, neighbour_score: float, temperature: float) -> None: """Test acceptance probability is always between 0 and 1.""" acceptance_probability = AdaptiveSimulatedAnnealing._compute_acceptance_probability( top_score=top_score, neighbour_score=neighbour_score, temperature=temperature, ) assert 0.0 <= acceptance_probability <= 1.0, "Acceptance probability not within 0 and 1"
def test_temperature_function( self, t0: float, accepted_final_states_count: int, limit: int, iteration: int, count: int, ) -> None: """Test the temperature function never drops bellow 0.""" context = flexmock( accepted_final_states_count=accepted_final_states_count, limit=limit, iteration=iteration, count=count, beam=flexmock(size=96), ) predictor = AdaptiveSimulatedAnnealing() assert (predictor._temperature_function(t0=t0, context=context) >= 0.0), "Temperature dropped bellow 0 or is NaN"
def test_run_exploration(self, context: Context) -> None: """Tests run when exploration is performed.""" flexmock(TemporalDifference) flexmock(TemporalDifference) flexmock(AdaptiveSimulatedAnnealing) flexmock(State) max_state = State(score=3.0) probable_state = State(score=2.0) context.beam.add_state(max_state) context.beam.add_state(probable_state) unresolved_dependency = ( "pytorch", "1.0.0", "https://thoth-station.ninja/simple", ) flexmock(random) random.should_receive("randrange").with_args(1, 2).and_return(0).once() random.should_receive("random").and_return(0.50).once( ) # *lower* than acceptance_probability that is 0.75 so we do exploitation probable_state.should_receive( "get_random_unresolved_dependency").with_args( prefer_recent=True).and_return(unresolved_dependency).once() TemporalDifference.should_receive("_temperature_function").with_args( 1.0, context).and_return(0.9).once() AdaptiveSimulatedAnnealing.should_receive( "_compute_acceptance_probability").with_args( max_state.score, probable_state.score, 0.9).and_return(0.75).once() context.beam.should_receive("max").with_args().and_return( max_state).once() predictor = TemporalDifference(step=1) predictor._steps_taken = 0 predictor._temperature = 1.0 with predictor.assigned_context(context): assert predictor.run() == (probable_state, unresolved_dependency) assert predictor._steps_taken == 1
def test_pre_run(self) -> None: """Test pre-run initialization.""" context = flexmock(limit=100) predictor = AdaptiveSimulatedAnnealing() assert predictor._temperature == 0.0 predictor._temperature_history = [(0.1, False, 0.2, 3), (0.42, True, 0.66, 47)] with predictor.assigned_context(context): predictor.pre_run() assert predictor._temperature == context.limit, "Predictor's limit not initialized correctly" assert predictor._temperature_history == [], "Predictor's temperature history no discarded"