def test_get_last_value(): tracker = metrics_tracking.MetricsTracker() tracker.register('new_metric', 'min') assert tracker.get_last_value('new_metric') is None tracker.set_history('new_metric', [ metrics_tracking.MetricObservation(1., 0), metrics_tracking.MetricObservation(2., 1), metrics_tracking.MetricObservation(3., 2) ]) assert tracker.get_last_value('new_metric') == 3.
def test_set_history(): tracker = metrics_tracking.MetricsTracker() tracker.set_history('new_metric', [ metrics_tracking.MetricObservation(0.5, 0), metrics_tracking.MetricObservation(1.5, 1), metrics_tracking.MetricObservation(2., 2), ]) values = [obs.value for obs in tracker.get_history('new_metric')] steps = [obs.step for obs in tracker.get_history('new_metric')] assert values == [[0.5], [1.5], [2.]] assert steps == [0, 1, 2]
def test_get_history(): tracker = metrics_tracking.MetricsTracker() tracker.update('new_metric', 0.5, step=0) tracker.update('new_metric', 1.5, step=1) tracker.update('new_metric', 2., step=2) assert tracker.get_history('new_metric') == [ metrics_tracking.MetricObservation(0.5, 0), metrics_tracking.MetricObservation(1.5, 1), metrics_tracking.MetricObservation(2., 2), ] with pytest.raises(ValueError, match='Unknown metric'): tracker.get_history('another_metric')
def test_get_last_value(): tracker = metrics_tracking.MetricsTracker() tracker.register("new_metric", "min") assert tracker.get_last_value("new_metric") is None tracker.set_history( "new_metric", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) assert tracker.get_last_value("new_metric") == 3.0
def test_metricobservation_proto(): obs = metrics_tracking.MetricObservation(-10, 5) proto = obs.to_proto() assert proto.value == [-10] assert proto.step == 5 new_obs = metrics_tracking.MetricObservation.from_proto(proto) assert new_obs == obs
def test_trial_proto(): hps = hp_module.HyperParameters() hps.Int("a", 0, 10, default=3) trial = trial_module.Trial(hps, trial_id="trial1", status="COMPLETED") trial.metrics.register("score", direction="max") trial.metrics.update("score", 10, step=1) proto = trial.to_proto() assert len(proto.hyperparameters.space.int_space) == 1 assert proto.hyperparameters.values.values["a"].int_value == 3 assert not proto.HasField("score") new_trial = trial_module.Trial.from_proto(proto) assert new_trial.status == "COMPLETED" assert new_trial.hyperparameters.get("a") == 3 assert new_trial.trial_id == "trial1" assert new_trial.score is None assert new_trial.best_step is None trial.score = -10 trial.best_step = 3 proto = trial.to_proto() assert proto.HasField("score") assert proto.score.value == -10 assert proto.score.step == 3 new_trial = trial_module.Trial.from_proto(proto) assert new_trial.score == -10 assert new_trial.best_step == 3 assert new_trial.metrics.get_history("score") == [ metrics_tracking.MetricObservation(10, step=1) ]
def test_trial_proto(): hps = hp_module.HyperParameters() hps.Int('a', 0, 10, default=3) trial = trial_module.Trial(hps, trial_id='trial1', status='COMPLETED') trial.metrics.register('score', direction='max') trial.metrics.update('score', 10, step=1) proto = trial.to_proto() assert len(proto.hyperparameters.space.int_space) == 1 assert proto.hyperparameters.values.values['a'].int_value == 3 assert not proto.HasField('score') new_trial = trial_module.Trial.from_proto(proto) assert new_trial.status == 'COMPLETED' assert new_trial.hyperparameters.get('a') == 3 assert new_trial.trial_id == 'trial1' assert new_trial.score is None assert new_trial.best_step is None trial.score = -10 trial.best_step = 3 proto = trial.to_proto() assert proto.HasField('score') assert proto.score.value == -10 assert proto.score.step == 3 new_trial = trial_module.Trial.from_proto(proto) assert new_trial.score == -10 assert new_trial.best_step == 3 assert new_trial.metrics.get_history('score') == [ metrics_tracking.MetricObservation(10, step=1) ]
def test_update(): tracker = metrics_tracking.MetricsTracker() tracker.update("new_metric", 0.5) # automatic registration assert set(tracker.metrics.keys()) == {"new_metric"} assert tracker.metrics["new_metric"].direction == "min" # default direction assert tracker.get_history("new_metric") == [ metrics_tracking.MetricObservation(0.5, step=0) ]
def test_update(): tracker = metrics_tracking.MetricsTracker() tracker.update('new_metric', 0.5) # automatic registration assert set(tracker.metrics.keys()) == {'new_metric'} assert tracker.metrics['new_metric'].direction == 'min' # default direction assert (tracker.get_history('new_metric') == [ metrics_tracking.MetricObservation(0.5, step=0) ])
def test_metrichistory_proto(): tracker = metrics_tracking.MetricHistory('max') tracker.update(5, step=3) tracker.update(10, step=4) proto = tracker.to_proto() assert proto.maximize assert proto.observations[0].value == [5] assert proto.observations[0].step == 3 assert proto.observations[1].value == [10] assert proto.observations[1].step == 4 new_tracker = metrics_tracking.MetricHistory.from_proto(proto) assert new_tracker.direction == 'max' assert new_tracker.get_history() == [ metrics_tracking.MetricObservation(5, 3), metrics_tracking.MetricObservation(10, 4) ]
def test_metricstracker_proto(): tracker = metrics_tracking.MetricsTracker() tracker.register('score', direction='max') tracker.update('score', value=10, step=1) tracker.update('score', value=20, step=1) tracker.update('score', value=30, step=2) proto = tracker.to_proto() obs = proto.metrics['score'].observations assert obs[0].value == [10, 20] assert obs[0].step == 1 assert obs[1].value == [30] assert obs[1].step == 2 assert proto.metrics['score'].maximize new_tracker = metrics_tracking.MetricsTracker.from_proto(proto) assert new_tracker.metrics['score'].direction == 'max' assert new_tracker.metrics['score'].get_history() == [ metrics_tracking.MetricObservation([10, 20], 1), metrics_tracking.MetricObservation(30, 2) ]
def test_get_statistics(): tracker = metrics_tracking.MetricsTracker() history = [ metrics_tracking.MetricObservation(random.random(), i) for i in range(10) ] tracker.set_history("new_metric", history) stats = tracker.get_statistics("new_metric") assert set(stats.keys()) == {"min", "max", "mean", "median", "var", "std"} history = [obs.value for obs in history] assert stats["min"] == np.min(history) assert stats["max"] == np.max(history) assert stats["mean"] == np.mean(history) assert stats["median"] == np.median(history) assert stats["var"] == np.var(history) assert stats["std"] == np.std(history)
def test_get_statistics(): tracker = metrics_tracking.MetricsTracker() history = [ metrics_tracking.MetricObservation(random.random(), i) for i in range(10) ] tracker.set_history('new_metric', history) stats = tracker.get_statistics('new_metric') assert set(stats.keys()) == {'min', 'max', 'mean', 'median', 'var', 'std'} history = [obs.value for obs in history] assert stats['min'] == np.min(history) assert stats['max'] == np.max(history) assert stats['mean'] == np.mean(history) assert stats['median'] == np.median(history) assert stats['var'] == np.var(history) assert stats['std'] == np.std(history)
def _test_update_trial(): hps = kt.HyperParameters() hps.Int('a', 0, 10, default=5) oracle = randomsearch.RandomSearchOracle( objective='score', max_trials=10, hyperparameters=hps) oracle._set_project_dir(tmp_dir, 'untitled') tuner_id = os.environ['KERASTUNER_TUNER_ID'] if 'chief' in tuner_id: oracle_chief.start_server(oracle) else: client = oracle_client.OracleClient(oracle) trial = client.create_trial(tuner_id) assert 'score' not in trial.metrics.metrics trial_id = trial.trial_id client.update_trial(trial_id, {'score': 1}, step=2) updated_trial = client.get_trial(trial_id) assert updated_trial.metrics.get_history('score') == [ metrics_tracking.MetricObservation([1], step=2)]
def test_get_best_value(): tracker = metrics_tracking.MetricsTracker() tracker.register('metric_min', 'min') tracker.register('metric_max', 'max') assert tracker.get_best_value('metric_min') is None tracker.set_history('metric_min', [ metrics_tracking.MetricObservation(1., 0), metrics_tracking.MetricObservation(2., 1), metrics_tracking.MetricObservation(3., 2) ]) tracker.set_history('metric_max', [ metrics_tracking.MetricObservation(1., 0), metrics_tracking.MetricObservation(2., 1), metrics_tracking.MetricObservation(3., 2) ]) assert tracker.get_best_value('metric_min') == 1. assert tracker.get_best_value('metric_max') == 3.
def test_serialization(): tracker = metrics_tracking.MetricsTracker() tracker.register('metric_min', 'min') tracker.register('metric_max', 'max') tracker.set_history('metric_min', [ metrics_tracking.MetricObservation(1., 0), metrics_tracking.MetricObservation(2., 1), metrics_tracking.MetricObservation(3., 2) ]) tracker.set_history('metric_max', [ metrics_tracking.MetricObservation(1., 0), metrics_tracking.MetricObservation(2., 1), metrics_tracking.MetricObservation(3., 2) ]) new_tracker = metrics_tracking.MetricsTracker.from_config( tracker.get_config()) assert new_tracker.metrics.keys() == tracker.metrics.keys()
def test_serialization(): tracker = metrics_tracking.MetricsTracker() tracker.register("metric_min", "min") tracker.register("metric_max", "max") tracker.set_history( "metric_min", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) tracker.set_history( "metric_max", [ metrics_tracking.MetricObservation(1.0, 0), metrics_tracking.MetricObservation(2.0, 1), metrics_tracking.MetricObservation(3.0, 2), ], ) new_tracker = metrics_tracking.MetricsTracker.from_config(tracker.get_config()) assert new_tracker.metrics.keys() == tracker.metrics.keys()