def test_metrics_correctness_with_iterator(self):
        model = keras.Sequential()
        model.add(
            keras.layers.Dense(8,
                               activation='relu',
                               input_dim=4,
                               kernel_initializer='ones'))
        model.add(
            keras.layers.Dense(1,
                               activation='sigmoid',
                               kernel_initializer='ones'))
        model.compile(loss='binary_crossentropy',
                      metrics=['accuracy',
                               metrics_module.BinaryAccuracy()],
                      optimizer=RMSPropOptimizer(learning_rate=0.001))

        np.random.seed(123)
        x = np.random.randint(10, size=(100, 4)).astype(np.float32)
        y = np.random.randint(2, size=(100, 1)).astype(np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)
        outs = model.evaluate(iterator, steps=10)
        self.assertEqual(np.around(outs[1], decimals=1), 0.5)
        self.assertEqual(np.around(outs[2], decimals=1), 0.5)

        y = np.zeros((100, 1), dtype=np.float32)
        dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
        dataset = dataset.repeat(100)
        dataset = dataset.batch(10)
        iterator = dataset_ops.make_one_shot_iterator(dataset)
        outs = model.evaluate(iterator, steps=10)
        self.assertEqual(outs[1], 0.)
        self.assertEqual(outs[2], 0.)
Beispiel #2
0
 def metrics(self, regularization_losses=None):
     """Creates metrics. See `base_head.Head` for details."""
     keys = metric_keys.MetricKeys
     with ops.name_scope(None, 'metrics', (regularization_losses, )):
         # Mean metric.
         eval_metrics = {}
         eval_metrics[self._loss_mean_key] = metrics.Mean(
             name=keys.LOSS_MEAN)
         # The default summation_method is "interpolation" in the AUC metric.
         eval_metrics[self._auc_key] = metrics.AUC(name=keys.AUC)
         eval_metrics[self._auc_pr_key] = metrics.AUC(curve='PR',
                                                      name=keys.AUC_PR)
         if regularization_losses is not None:
             eval_metrics[self._loss_regularization_key] = metrics.Mean(
                 name=keys.LOSS_REGULARIZATION)
         for i, threshold in enumerate(self._thresholds):
             eval_metrics[self._accuracy_keys[i]] = metrics.BinaryAccuracy(
                 name=self._accuracy_keys[i], threshold=threshold)
             eval_metrics[self._precision_keys[i]] = (metrics.Precision(
                 name=self._precision_keys[i], thresholds=threshold))
             eval_metrics[self._recall_keys[i]] = metrics.Recall(
                 name=self._recall_keys[i], thresholds=threshold)
         for i in range(len(self._classes_for_class_based_metrics)):
             eval_metrics[self._prob_keys[i]] = metrics.Mean(
                 name=self._prob_keys[i])
             eval_metrics[self._auc_keys[i]] = metrics.AUC(
                 name=self._auc_keys[i])
             eval_metrics[self._auc_pr_keys[i]] = metrics.AUC(
                 curve='PR', name=self._auc_pr_keys[i])
     return eval_metrics
  def test_metrics_correctness_with_dataset(self):
    layers = [
        keras.layers.Dense(
            8, activation='relu', input_dim=4, kernel_initializer='ones'),
        keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
    ]

    model = testing_utils.get_model_from_layers(layers, (4,))

    model.compile(
        loss='binary_crossentropy',
        metrics=['accuracy', metrics_module.BinaryAccuracy()],
        optimizer='rmsprop',
        run_eagerly=testing_utils.should_run_eagerly())

    np.random.seed(123)
    x = np.random.randint(10, size=(100, 4)).astype(np.float32)
    y = np.random.randint(2, size=(100, 1)).astype(np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.batch(10)
    outs = model.evaluate(dataset, steps=10)
    self.assertEqual(np.around(outs[1], decimals=1), 0.5)
    self.assertEqual(np.around(outs[2], decimals=1), 0.5)

    y = np.zeros((100, 1), dtype=np.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    outs = model.evaluate(dataset, steps=10)
    self.assertEqual(outs[1], 0.)
    self.assertEqual(outs[2], 0.)
Beispiel #4
0
  def test_binary_accuracy(self):
    acc_obj = metrics.BinaryAccuracy(name='my acc')

    # check config
    self.assertEqual(acc_obj.name, 'my acc')
    self.assertTrue(acc_obj.stateful)
    self.assertEqual(len(acc_obj.variables), 2)
    self.assertEqual(acc_obj.dtype, dtypes.float32)
    self.evaluate(variables.variables_initializer(acc_obj.variables))

    # verify that correct value is returned
    update_op = acc_obj.update_state([[1], [0]], [[1], [0]])
    self.evaluate(update_op)
    result = self.evaluate(acc_obj.result())
    self.assertEqual(result, 1)  # 2/2

    # check y_pred squeeze
    update_op = acc_obj.update_state([[1], [1]], [[[1]], [[0]]])
    self.evaluate(update_op)
    result = self.evaluate(acc_obj.result())
    self.assertAlmostEqual(result, 0.75, 2)  # 3/4

    # check y_true squeeze
    result_t = acc_obj([[[1]], [[1]]], [[1], [0]])
    result = self.evaluate(result_t)
    self.assertAlmostEqual(result, 0.67, 2)  # 4/6

    # check with sample_weight
    result_t = acc_obj([[1], [1]], [[1], [0]], [[0.5], [0.2]])
    result = self.evaluate(result_t)
    self.assertAlmostEqual(result, 0.67, 2)  # 4.5/6.7
 def metrics(self, regularization_losses=None):
   """Creates metrics. See `base_head.Head` for details."""
   keys = metric_keys.MetricKeys
   with ops.name_scope('metrics', values=(regularization_losses,)):
     # Mean metric.
     eval_metrics = {}
     eval_metrics[self._loss_mean_key] = metrics.Mean(name=keys.LOSS_MEAN)
     eval_metrics[self._accuracy_key] = metrics.Accuracy(name=keys.ACCURACY)
     eval_metrics[self._precision_key] = metrics.Precision(name=keys.PRECISION)
     eval_metrics[self._recall_key] = metrics.Recall(name=keys.RECALL)
     eval_metrics[self._prediction_mean_key] = metrics.Mean(
         name=keys.PREDICTION_MEAN)
     eval_metrics[self._label_mean_key] = metrics.Mean(name=keys.LABEL_MEAN)
     eval_metrics[self._accuracy_baseline_key] = (
         metrics.Mean(name=keys.ACCURACY_BASELINE))
     # The default summation_method is "interpolation" in the AUC metric.
     eval_metrics[self._auc_key] = metrics.AUC(name=keys.AUC)
     eval_metrics[self._auc_pr_key] = metrics.AUC(curve='PR', name=keys.AUC_PR)
     if regularization_losses is not None:
       eval_metrics[self._loss_regularization_key] = metrics.Mean(
           name=keys.LOSS_REGULARIZATION)
     for i, threshold in enumerate(self._thresholds):
       eval_metrics[self._accuracy_keys[i]] = metrics.BinaryAccuracy(
           name=self._accuracy_keys[i], threshold=threshold)
       eval_metrics[self._precision_keys[i]] = metrics.Precision(
           name=self._precision_keys[i], thresholds=threshold)
       eval_metrics[self._recall_keys[i]] = metrics.Recall(
           name=self._recall_keys[i], thresholds=threshold)
   return eval_metrics
    def model_fn(features, labels, mode):  # pylint: disable=unused-argument
        """model_fn which uses a single unit Dense layer."""
        # You can also use the Flatten layer if you want to test a model without any
        # weights.
        layer = tf.layers.Dense(1, use_bias=True)
        logits = layer(features)

        if mode == tf.estimator.ModeKeys.PREDICT:
            predictions = {"logits": logits}
            return tf.estimator.EstimatorSpec(mode, predictions=predictions)

        def loss_fn():
            y = tf.reshape(logits, []) - tf.constant(1.)
            return y * y

        if mode == tf.estimator.ModeKeys.EVAL:
            acc_obj = metrics_module.BinaryAccuracy()
            acc_obj.update_state(labels, labels)
            return tf.estimator.EstimatorSpec(
                mode, loss=loss_fn(), eval_metric_ops={"Accuracy": acc_obj})

        assert mode == tf.estimator.ModeKeys.TRAIN

        global_step = tf.train.get_global_step()
        train_op = optimizer.minimize(loss_fn(), global_step=global_step)
        return tf.estimator.EstimatorSpec(mode,
                                          loss=loss_fn(),
                                          train_op=train_op)
Beispiel #7
0
    def metrics(self, regularization_losses=None):
        """Creates metrics. See `base_head.Head` for details."""
        keys = metric_keys.MetricKeys
        with ops.name_scope(None, 'metrics', (regularization_losses, )):
            # Mean metric.
            eval_metrics = {}
            eval_metrics[self._loss_mean_key] = metrics.Mean(
                name=keys.LOSS_MEAN)
            # TODO(b/118843532): create Keras metrics
            # eval_metrics[self._precision_key] = metrics.Precision(name=keys.AUC)
            # eval_metrics[self._auc_pr_key] = metrics.Precision(name=keys.AUC_PR)
            if regularization_losses is not None:
                eval_metrics[self._loss_regularization_key] = metrics.Mean(
                    name=keys.LOSS_REGULARIZATION)
            for i, threshold in enumerate(self._thresholds):
                eval_metrics[self._accuracy_keys[i]] = metrics.BinaryAccuracy(
                    name=self._accuracy_keys[i], threshold=threshold)
                # TODO(b/118843532): create Keras metrics
                # eval_metrics[self._precision_keys[i]] = (
                #     metrics.PRECISION_AT_THRESHOLD(
                #     name=self._precision_keys[i], threshold=threshold))
                # eval_metrics[self._recall_keys[i]] = metrics.RECALL_AT_THRESHOLD(
                #     name=self._recall_keys[i], threshold=threshold)
            for i in range(len(self._classes_for_class_based_metrics)):
                # TODO(b/118843532): create Keras metrics
                eval_metrics[self._prob_keys[i]] = metrics.Mean(
                    name=self._prob_keys[i])
                # eval_metrics[self._auc_keys[i]] = metrics.AUC(name=self._auc_keys[i])
                # eval_metrics[self._auc_pr_keys[i]] = metrics.AUC_PR(
                #     name=self._auc_pr_keys[i])

        return eval_metrics
Beispiel #8
0
 def metrics(self, regularization_losses=None):
     """Creates metrics. See `base_head.Head` for details."""
     keys = metric_keys.MetricKeys
     with ops.name_scope('metrics', values=(regularization_losses, )):
         # Mean metric.
         eval_metrics = {}
         eval_metrics[self._loss_mean_key] = metrics.Mean(
             name=keys.LOSS_MEAN)
         eval_metrics[self._accuracy_key] = (
             metrics.SparseCategoricalAccuracy(name=keys.ACCURACY))
         # TODO(b/118843532): create Keras metrics
         # eval_metrics[self._precision_key] = metrics.Precision(name=keys.AUC)
         # eval_metrics[self._recall_key] = metrics.Precision(name=keys.RECALL)
         eval_metrics[self._prediction_mean_key] = metrics.Mean(
             name=keys.PREDICTION_MEAN)
         eval_metrics[self._label_mean_key] = metrics.Mean(
             name=keys.LABEL_MEAN)
         # TODO(b/118843532): create Keras metrics
         # eval_metrics[self._accuracy_baseline_key] = (
         #     metrics.Mean(name=keys.ACCURACY_BASELINE))
         # eval_metrics[self._auc_key] = metrics.Precision(name=keys.PRECISION)
         # eval_metrics[self._auc_pr_key] = metrics.Precision(name=keys.AUC_PR)
         if regularization_losses is not None:
             eval_metrics[self._loss_regularization_key] = metrics.Mean(
                 name=keys.LOSS_REGULARIZATION)
         for i, threshold in enumerate(self._thresholds):
             eval_metrics[self._accuracy_keys[i]] = metrics.BinaryAccuracy(
                 name=self._accuracy_keys[i], threshold=threshold)
             # TODO(b/118843532): create Keras metrics
             # eval_metrics[self._precision_keys[i]] = (
             #     metrics.PRECISION_AT_THRESHOLD(
             #         name=self._precision_keys[i], threshold=threshold))
             # eval_metrics[self._recall_keys[i]] = metrics.RECALL_AT_THRESHOLD(
             #     name=self._recall_keys[i], threshold=threshold)
     return eval_metrics
Beispiel #9
0
 def test_binary_accuracy_threshold(self):
     acc_obj = metrics.BinaryAccuracy(threshold=0.7)
     self.evaluate(variables.variables_initializer(acc_obj.variables))
     result_t = acc_obj([[1], [1], [0], [0]], [[0.9], [0.6], [0.4], [0.8]])
     result = self.evaluate(result_t)
     self.assertAlmostEqual(result, 0.5, 2)