def on_epoch_end(self, epoch, logs={}):
        # update epoch counters
        self.execution_state.epochs += 1
        self.tuner_state.remaining_budget -= 1

        objective = canonicalize_metric_name(self.tuner_state.objective)

        # update metrics and checkpoint if needed
        for metric, value in logs.items():
            improved = self.execution_state.metrics.update(metric, value)
            metric = canonicalize_metric_name(metric)
            if objective == metric and improved:
                # Compute classification metrics and store in execution state
                if self.validation_data:
                    report = compute_common_classification_metrics(
                        self.model, self.validation_data,
                        self.tuner_state.label_names)
                    self.execution_state.update_performance_metrics(report)

                # TODO - figure out the race condition that causes us to clear
                # the session before we finish the writes when we try to
                # apply_async here.
                # self.thread_pool.apply_async(self._checkpoint_model)
                self._checkpoint_model()

                self._write_result_file()

        # reset epoch history
        self.epoch_history = defaultdict(list)

        # update status
        self._report_status(force=True)
Example #2
0
def test_continuous_single_classification_metrics_int():
    model = _single_output_model(dtype=tf.float32)
    x_val = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
    y_val = np.array([0, 0, 0, 0, 1, 0, 1, 1, 1, 1])

    results = compute_common_classification_metrics(model, (x_val, y_val))

    _test_classification_metrics(results)
Example #3
0
def test_continuous_multi_classification_metrics():
    model = _multi_output_model(2, dtype=tf.float32)

    x_val = np.array([[-1, 1] for _ in range(5)] + [[1, -1] for _ in range(5)])

    y_val = [0, 0, 0, 0, 1, 0, 1, 1, 1, 1]
    y_val = np.array([(x, 1 - x) for x in y_val], dtype=np.float32)

    results = compute_common_classification_metrics(model, (x_val, y_val))

    _test_classification_metrics(results)
Example #4
0
def test_continuous_single_classification_metrics_training_end():
    model = _single_output_model(dtype=tf.float32)
    x_val = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0])
    y_val = np.array([0, 0, 0, 0, 1, 0, 1, 1, 1, 1])

    results = compute_common_classification_metrics(model, (x_val, y_val))

    _test_classification_metrics(results)

    assert np.allclose(results["roc_curve"]["fpr"], [0, 0.2, 1], atol=.05)
    assert np.allclose(results["roc_curve"]["tpr"], [0, 0.8, 1], atol=.05)
    assert np.allclose(results["roc_curve"]["thresholds"], [2, 1, 0], atol=.05)
Example #5
0
def test_continuous_multi_classification_metrics_5way():
    model = _multi_output_model(5, dtype=tf.float32)

    x = [x for x in range(5)] + [-x for x in range(5)]
    x = tf.keras.utils.to_categorical(x, num_classes=5)
    y = x

    results = compute_common_classification_metrics(model, (x, y))

    metrics = results["classification_metrics"]
    assert np.isclose(1, metrics["macro avg"]["f1-score"])
    assert np.isclose(1, metrics["weighted avg"]["f1-score"])
Example #6
0
def test_continuous_multi_classification_metrics_float():
    model = _multi_output_model(2, dtype=tf.float32)

    model.summary()

    x_val = np.array([[0, 1] for _ in range(5)] + [[0, -1] for _ in range(5)],
                     dtype=np.float32)
    y_val = [0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0]
    y_val = np.array([[x, 1 - x] for x in y_val], dtype=np.float32)
    results = compute_common_classification_metrics(model, (x_val, y_val))

    _test_classification_metrics(results)