Ejemplo n.º 1
0
    def test_accuracy(self):
        acc_obj = metrics.Accuracy(name='my acc')

        # check config
        self.assertEqual(acc_obj.name, 'my acc')
        self.assertTrue(acc_obj.stateful)
        self.assertEqual(len(acc_obj.variables), 2)
        self.assertEqual(acc_obj.dtype, dtypes.float32)
        self.evaluate(variables.variables_initializer(acc_obj.variables))

        # verify that correct value is returned
        update_op = acc_obj.update_state([[1], [2], [3], [4]],
                                         [[1], [2], [3], [4]])
        self.evaluate(update_op)
        result = self.evaluate(acc_obj.result())
        self.assertEqual(result, 1)  # 2/2

        # Check save and restore config
        a2 = metrics.Accuracy.from_config(acc_obj.get_config())
        self.assertEqual(a2.name, 'my acc')
        self.assertTrue(a2.stateful)
        self.assertEqual(len(a2.variables), 2)
        self.assertEqual(a2.dtype, dtypes.float32)

        # check with sample_weight
        result_t = acc_obj([[2], [1]], [[2], [0]],
                           sample_weight=[[0.5], [0.2]])
        result = self.evaluate(result_t)
        self.assertAlmostEqual(result, 0.96, 2)  # 4.5/4.7
Ejemplo n.º 2
0
 def metrics(self, regularization_losses=None):
   """Creates metrics. See `base_head.Head` for details."""
   keys = metric_keys.MetricKeys
   with ops.name_scope('metrics', values=(regularization_losses,)):
     # Mean metric.
     eval_metrics = {}
     eval_metrics[self._loss_mean_key] = metrics.Mean(name=keys.LOSS_MEAN)
     eval_metrics[self._accuracy_key] = metrics.Accuracy(name=keys.ACCURACY)
     eval_metrics[self._precision_key] = metrics.Precision(name=keys.PRECISION)
     eval_metrics[self._recall_key] = metrics.Recall(name=keys.RECALL)
     eval_metrics[self._prediction_mean_key] = metrics.Mean(
         name=keys.PREDICTION_MEAN)
     eval_metrics[self._label_mean_key] = metrics.Mean(name=keys.LABEL_MEAN)
     eval_metrics[self._accuracy_baseline_key] = (
         metrics.Mean(name=keys.ACCURACY_BASELINE))
     # The default summation_method is "interpolation" in the AUC metric.
     eval_metrics[self._auc_key] = metrics.AUC(name=keys.AUC)
     eval_metrics[self._auc_pr_key] = metrics.AUC(curve='PR', name=keys.AUC_PR)
     if regularization_losses is not None:
       eval_metrics[self._loss_regularization_key] = metrics.Mean(
           name=keys.LOSS_REGULARIZATION)
     for i, threshold in enumerate(self._thresholds):
       eval_metrics[self._accuracy_keys[i]] = metrics.BinaryAccuracy(
           name=self._accuracy_keys[i], threshold=threshold)
       eval_metrics[self._precision_keys[i]] = metrics.Precision(
           name=self._precision_keys[i], thresholds=threshold)
       eval_metrics[self._recall_keys[i]] = metrics.Recall(
           name=self._recall_keys[i], thresholds=threshold)
   return eval_metrics
Ejemplo n.º 3
0
    def testEvaluatePerfectModel(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_perfect_model_once')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run
        inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        labels = constant_op.constant(self._labels, dtype=dtypes.float32)
        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy = metrics_module.Accuracy()
        update_op = accuracy.update_state(labels, predictions)

        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={'accuracy': (accuracy.result(), update_op)},
            hooks=[
                evaluation._StopAfterNEvalsHook(1),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
Ejemplo n.º 4
0
 def metrics(self, regularization_losses=None):
   """Creates metrics. See `base_head.Head` for details."""
   keys = metric_keys.MetricKeys
   with ops.name_scope('metrics', values=(regularization_losses,)):
     # Mean metric.
     eval_metrics = {}
     eval_metrics[self._loss_mean_key] = metrics.Mean(name=keys.LOSS_MEAN)
     if regularization_losses is not None:
       eval_metrics[self._loss_regularization_key] = metrics.Mean(
           name=keys.LOSS_REGULARIZATION)
     # Accuracy metric.
     eval_metrics[self._accuracy_key] = metrics.Accuracy(name=keys.ACCURACY)
   return eval_metrics
Ejemplo n.º 5
0
    def testEvaluateWithFiniteInputs(self):
        checkpoint_dir = os.path.join(self.get_temp_dir(),
                                      'evaluate_with_finite_inputs')

        # Train a Model to completion:
        self._train_model(checkpoint_dir, num_steps=300)

        # Run evaluation. Inputs are fed through input producer for one epoch.
        all_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
        all_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

        single_input, single_label = training.slice_input_producer(
            [all_inputs, all_labels], num_epochs=1)
        inputs, labels = training.batch([single_input, single_label],
                                        batch_size=6,
                                        allow_smaller_final_batch=True)

        logits = logistic_classifier(inputs)
        predictions = math_ops.round(logits)

        accuracy = metrics_module.Accuracy()
        update_op = accuracy.update_state(labels, predictions)

        checkpoint_path = saver.latest_checkpoint(checkpoint_dir)

        final_ops_values = evaluation._evaluate_once(
            checkpoint_path=checkpoint_path,
            eval_ops=update_op,
            final_ops={
                'accuracy': (accuracy.result(), update_op),
                'eval_steps': evaluation._get_or_create_eval_step()
            },
            hooks=[
                evaluation._StopAfterNEvalsHook(None),
            ])
        self.assertTrue(final_ops_values['accuracy'] > .99)
        # Runs evaluation for 4 iterations. First 2 evaluate full batch of 6 inputs
        # each; the 3rd iter evaluates the remaining 4 inputs, and the last one
        # triggers an error which stops evaluation.
        self.assertEqual(final_ops_values['eval_steps'], 4)
Ejemplo n.º 6
0
train_dataset = tf.data.Dataset.from_tensor_slices((scaled_images, rotations))
train_dataset = train_dataset.shuffle(buffer_size=524)
valid_dataset = tf.data.Dataset.from_tensor_slices(
    (scaled_images_valid, rotations_valid))
valid_dataset = train_dataset.shuffle(buffer_size=524)

model = ConvModel()

#loss_object = tf.keras.losses.binary_crossentropy()
optimizer = tf.keras.optimizers.Adam()
#track the evolution
# Loss
train_loss = metrics.Mean(name='train_loss')
valid_loss = metrics.Mean(name='valid_loss')
# Accuracy
train_accuracy = metrics.Accuracy(name='train_accuracy')
valid_accuracy = metrics.Accuracy(name='valid_accuracy')


@tf.function
def train_step(image, rotations):
    # permet de surveiller les opérations réalisé afin de calculer le gradient
    with tf.GradientTape() as tape:
        # fait une prediction
        predictions = model(image)
        print("rotations shape after creation model", rotations)
        print("prediction shape after creation model", predictions)
        # calcul de l'erreur en fonction de la prediction et des targets
        loss = keras.losses.mean_squared_error(rotations, predictions)
        print("calcul loss", loss)
    # calcul du gradient en fonction du loss