def test_returns_start_lr_when_there_are_no_metrics(self): history = trax_history.History() start_lr = 1e-3 schedule = self._make_schedule( history, control_configs=(("learning_rate", start_lr, (1e-9, 1.0), False),), ) self.assertEqual(schedule(0)["learning_rate"], start_lr)
def test_clips_observations(self): history = trax_history.History() self._append_metrics(history, ("eval", "loss"), [-10, 10]) observations = online_tune.history_to_observations( history, metrics=(("eval", "loss"), ), observation_range=(-2, 2), include_lr=False, ) np.testing.assert_array_equal(observations, [[-2], [2]])
def restore_state(output_dir): """Restore State.""" params_file = os.path.join(output_dir, "model.pkl") if not gfile.exists(params_file): return State(step=None, params=None, history=trax_history.History()) with gfile.GFile(params_file, "rb") as f: (params, step, history) = pickle.load(f) log("Model loaded from %s" % params_file) return State(step=step, params=params, history=history)
def test_clips_observations(self): history = trax_history.History() self._append_metrics(history, ("eval", "loss"), [-10, 10]) observations = online_tune.history_to_observations( history, metrics=(("eval", "loss"), ), observation_range=(-2, 2), control_configs=None, ) np.testing.assert_array_equal(observations, [[-1], [1]])
def test_clips_new_learning_rate(self): history = trax_history.History() self._append_metrics(history, online_tune.LEARNING_RATE_METRIC, [1e-3]) new_lr = online_tune.new_learning_rate( action=0, history=history, action_multipliers=(4.0, 1.0, 0.25), max_lr=3e-3, ) np.testing.assert_almost_equal(new_lr, 3e-3)
def test_calculates_new_learning_rate(self): history = trax_history.History() self._append_metrics(history, online_tune.LEARNING_RATE_METRIC, [1e-2, 1e-3]) new_lr = online_tune.new_learning_rate( action=2, history=history, action_multipliers=(0.5, 1.0, 2.0), max_lr=1.0, ) np.testing.assert_almost_equal(new_lr, 2e-3)
def test_changes_lr_when_there_are_some_metrics(self): history = trax_history.History() history.append("eval", "metrics/accuracy", step=0, value=0.8) history.append(*online_tune.LEARNING_RATE_METRIC, step=0, value=1e-4) schedule = self._make_schedule( history, observation_metrics=(("eval", "metrics/accuracy"),), action_multipliers=(0.5, 2.0), ) self.assertTrue( onp.allclose(schedule(123), 5e-5) or onp.allclose(schedule(123), 2e-4))
def restore_state(output_dir): """Restore State.""" params_file = os.path.join(output_dir, "model.pkl") if not gfile.exists(params_file): return State(step=None, opt_state=None, history=trax_history.History()) with gfile.GFile(params_file, "rb") as f: (opt_state, step, history) = pickle.load(f) log("Model loaded from %s at step %d" % (params_file, step)) logging.debug("From loaded model : history = %s", history) return State(step=step, opt_state=OptState(*opt_state), history=history)
def test_converts_history_to_observations_without_learning_rate(self): history = trax_history.History() self._append_metrics(history, ("train", "loss"), [3.0, 1.07]) self._append_metrics(history, ("eval", "accuracy"), [0.12, 0.68]) observations = online_tune.history_to_observations( history, metrics=(("eval", "accuracy"), ("train", "loss")), observation_range=(0, 5), include_lr=False, ) np.testing.assert_array_equal(observations, [[0.12, 3.0], [0.68, 1.07]])
def test_converts_history_to_observations_without_controls(self): history = trax_history.History() self._append_metrics(history, ("train", "loss"), [1.0, 0.07]) self._append_metrics(history, ("eval", "accuracy"), [0.12, 0.68]) observations = online_tune.history_to_observations( history, metrics=(("eval", "accuracy"), ("train", "loss")), observation_range=(-1, 1), control_configs=None, ) np.testing.assert_array_almost_equal(observations, [[0.12, 1.0], [0.68, 0.07]])
def test_clips_updated_control_with_flipping(self): config = ("momentum", None, (0.5, 0.99), True) history = trax_history.History() self._append_metrics(history, online_tune.control_metric("momentum"), [0.985]) new_control = online_tune.update_control( control_config=config, action=0, history=history, action_multipliers=(0.5, 1.0, 2.0), ) np.testing.assert_almost_equal(new_control, 0.99)
def test_clips_updated_control_without_flipping(self): config = ("learning_rate", None, (1e-9, 10.0), False) history = trax_history.History() self._append_metrics(history, online_tune.control_metric("learning_rate"), [7.0]) new_control = online_tune.update_control( control_config=config, action=2, history=history, action_multipliers=(0.5, 1.0, 2.0), ) np.testing.assert_almost_equal(new_control, 10.0)
def test_converts_history_to_observations_with_learning_rate(self): history = trax_history.History() self._append_metrics(history, ("train", "training/learning_rate"), [1e-3, 1e-4]) observations = online_tune.history_to_observations( history, metrics=(), observation_range=(0, 5), include_lr=True, ) self.assertEqual(observations.shape, (2, 1)) ((log_lr_1, ), (log_lr_2, )) = observations self.assertGreater(log_lr_1, log_lr_2)
def test_converts_history_to_observations_with_controls(self): history = trax_history.History() self._append_metrics(history, ("train", "training/learning_rate"), [1e-3, 1e-4]) observations = online_tune.history_to_observations( history, metrics=(), observation_range=(0, 5), control_configs=(("learning_rate", None, (1e-9, 10.0), False), ), ) self.assertEqual(observations.shape, (2, 1)) ((log_lr_1, ), (log_lr_2, )) = observations self.assertGreater(log_lr_1, log_lr_2)
def test_changes_lr_when_there_are_some_metrics(self): history = trax_history.History() history.append("eval", "metrics/accuracy", step=0, value=0.8) history.append(*online_tune.control_metric("learning_rate"), step=0, value=1e-4) schedule = self._make_schedule( history, observation_metrics=(("eval", "metrics/accuracy"), ), action_multipliers=(0.5, 2.0), ) new_lr = schedule(123)["learning_rate"] self.assertTrue( onp.allclose(new_lr, 5e-5) or onp.allclose(new_lr, 2e-4))
def test_works_with_multiple_controls(self): history = trax_history.History() history.append("eval", "metrics/accuracy", step=0, value=0.8) history.append( *online_tune.control_metric("learning_rate"), step=0, value=1e-4 ) history.append( *online_tune.control_metric("weight_decay_rate"), step=0, value=1e-5 ) schedule = self._make_schedule( history, observation_metrics=(("eval", "metrics/accuracy"),), control_configs=( ("learning_rate", 1e-3, (1e-9, 1.0), False), ("weight_decay_rate", 1e-5, (1e-9, 1.0), False), ), action_multipliers=(1.0,), ) new_controls = schedule(123) self.assertIn("learning_rate", new_controls) self.assertIn("weight_decay_rate", new_controls)
def test_returns_start_lr_when_there_are_no_metrics(self): history = trax_history.History() schedule = self._make_schedule(history, start_lr=1e-3) self.assertEqual(schedule(0), 1e-3)
def test_retrieves_historical_metric_values(self): history = trax_history.History() self._append_metrics(history, ("train", "accuracy"), [0.1, 0.73]) metric_values = online_tune.historical_metric_values( history, metric=("train", "accuracy"), observation_range=(0, 5)) np.testing.assert_array_equal(metric_values, [0.1, 0.73])
def test_clips_historical_metric_values(self): history = trax_history.History() self._append_metrics(history, ("train", "loss"), [-10, 10]) metric_values = online_tune.historical_metric_values( history, metric=("train", "loss"), observation_range=(-1, 1)) np.testing.assert_array_equal(metric_values, [-1, 1])