Exemple #1
0
 def add_input(
     self, accumulator: _CompilableMetricsAccumulator,
     element: metric_types.StandardMetricInputs
 ) -> _CompilableMetricsAccumulator:
     for i, output_name in enumerate(self._output_names):
         # The use of class_weights means that micro averaging is being used. When
         # micro averaging is being used, flatten should be set to True so that
         # each class is treated as though it was an independent example.
         for label, prediction, example_weight in (
                 metric_util.to_label_prediction_example_weight(
                     element,
                     eval_config=self._eval_config,
                     model_name=self._model_name,
                     output_name=output_name,
                     # Skip top_k processing and let keras perform top_k calculations
                     sub_key=self._sub_key
                     if not self._is_top_k() else None,
                     class_weights=self._class_weights,
                     flatten=self._class_weights is not None)):
             # Keras requires non-sparse keys for top_k calcuations.
             if self._is_top_k() and label.shape != prediction.shape:
                 label = metric_util.one_hot(label, prediction)
             accumulator.add_input(i, label, prediction, example_weight)
     if (accumulator.len_inputs() >= self._batch_size
             or accumulator.total_input_byte_size >=
             self._TOTAL_INPUT_BYTE_SIZE_THRESHOLD):
         self._process_batch(accumulator)
     return accumulator
Exemple #2
0
 def add_input(
     self, accumulator: tf_metric_accumulators.TFCompilableMetricsAccumulator,
     element: metric_types.StandardMetricInputs
 ) -> tf_metric_accumulators.TFCompilableMetricsAccumulator:
   for i, output_name in enumerate(self._output_names):
     # When micro averaging is being used, flatten should be set to True so
     # that each class is treated as though it was an independent example.
     micro_average = (
         self._aggregation_type and self._aggregation_type.micro_average)
     for label, prediction, example_weight in (
         metric_util.to_label_prediction_example_weight(
             element,
             eval_config=self._eval_config,
             model_name=self._model_name,
             output_name=output_name,
             # Skip sub_key processing if part of the keras config
             sub_key=self._sub_key if not self._sub_key_in_config else None,
             aggregation_type=self._aggregation_type,
             class_weights=self._class_weights,
             flatten=micro_average)):
       # Keras requires non-sparse keys for its calcuations.
       if self._sub_key_in_config and label.shape != prediction.shape:
         label = metric_util.one_hot(label, prediction)
       accumulator.add_input(i, label, prediction, example_weight)
   if accumulator.should_flush():
     self._process_batch(accumulator)
   return accumulator
Exemple #3
0
 def add_input(self, accumulator: _Matrices,
               element: metric_types.StandardMetricInputs) -> _Matrices:
     labels, predictions, example_weight = next(
         metric_util.to_label_prediction_example_weight(
             element,
             eval_config=self._eval_config,
             model_name=self._key.model_name,
             output_name=self._key.output_name,
             flatten=False,
             require_single_example_weight=True))
     if not labels.shape:
         raise ValueError(
             'Labels missing from example: StandardMetricInputs={}'.format(
                 element))
     if predictions.shape in ((), (1, )):
         raise ValueError(
             'Predictions shape must be > 1 for multi-label confusion matrix: '
             'shape={}, StandardMetricInputs={}'.format(
                 predictions.shape, element))
     # If the label and prediction shapes are different then assume the labels
     # are sparse and convert them to dense.
     if (len(labels.shape) != len(predictions.shape)
             or labels.shape[-1] != predictions.shape[-1]):
         labels = metric_util.one_hot(labels, predictions)
     example_weight = float(example_weight)
     for threshold in self._thresholds:
         if threshold not in accumulator:
             accumulator[threshold] = {}
         for actual_class_id, label in enumerate(labels):
             if not label:
                 continue
             for class_id, prediction in enumerate(predictions):
                 matrix_key = _MatrixEntryKey(actual_class_id, class_id)
                 fn = (labels[class_id]
                       and prediction <= threshold) * example_weight
                 fp = (not labels[class_id]
                       and prediction > threshold) * example_weight
                 tn = ((not labels[class_id] and prediction <= threshold) *
                       example_weight)
                 tp = (labels[class_id]
                       and prediction > threshold) * example_weight
                 if matrix_key in accumulator[threshold]:
                     accumulator[threshold][
                         matrix_key].false_negatives += fn
                     accumulator[threshold][matrix_key].true_negatives += tn
                     accumulator[threshold][
                         matrix_key].false_positives += fp
                     accumulator[threshold][matrix_key].true_positives += tp
                 else:
                     matrix = _ConfusionMatrix()
                     matrix.false_negatives = fn
                     matrix.true_negatives = tn
                     matrix.false_positives = fp
                     matrix.true_positives = tp
                     accumulator[threshold][matrix_key] = matrix
     return accumulator