コード例 #1
0
def _build_calibrated_score_converter(score_converter_fn, calibration_config):
    """Wraps a score_converter_fn, adding a calibration step.

  Builds a score converter function witha calibration transformation according
  to calibration_builder.py. Calibration applies positive monotonic
  transformations to inputs (i.e. score ordering is strictly preserved or
  adjacent scores are mapped to the same score). When calibration is
  class-agnostic, the highest-scoring class remains unchanged, unless two
  adjacent scores are mapped to the same value and one class arbitrarily
  selected to break the tie. In per-class calibration, it's possible (though
  rare in practice) that the highest-scoring class will change, since positive
  monotonicity is only required to hold within each class.

  Args:
    score_converter_fn: callable that takes logit scores as input.
    calibration_config: post_processing_pb2.PostProcessing.calibration_config.

  Returns:
    Callable calibrated score coverter op.
  """
    calibration_fn = calibration_builder.build(calibration_config)

    def calibrated_score_converter_fn(logits):
        converted_logits = score_converter_fn(logits)
        return calibration_fn(converted_logits)

    calibrated_score_converter_fn.__name__ = (
        'calibrate_with_%s' % calibration_config.WhichOneof('calibrator'))
    return calibrated_score_converter_fn
コード例 #2
0
def _build_calibrated_score_converter(score_converter_fn, calibration_config):
  """Wraps a score_converter_fn, adding a calibration step.

  Builds a score converter function witha calibration transformation according
  to calibration_builder.py. Calibration applies positive monotonic
  transformations to inputs (i.e. score ordering is strictly preserved or
  adjacent scores are mapped to the same score). When calibration is
  class-agnostic, the highest-scoring class remains unchanged, unless two
  adjacent scores are mapped to the same value and one class arbitrarily
  selected to break the tie. In per-class calibration, it's possible (though
  rare in practice) that the highest-scoring class will change, since positive
  monotonicity is only required to hold within each class.

  Args:
    score_converter_fn: callable that takes logit scores as input.
    calibration_config: post_processing_pb2.PostProcessing.calibration_config.

  Returns:
    Callable calibrated score coverter op.
  """
  calibration_fn = calibration_builder.build(calibration_config)
  def calibrated_score_converter_fn(logits):
    converted_logits = score_converter_fn(logits)
    return calibration_fn(converted_logits)
  calibrated_score_converter_fn.__name__ = (
      'calibrate_with_%s' % calibration_config.WhichOneof('calibrator'))
  return calibrated_score_converter_fn
コード例 #3
0
    def test_multiclass_function_approximations(self):
        """Tests that calibration produces correct multiclass values."""
        # Background class (0-index) maps all predictions to 0.5.
        class_0_x = np.asarray([0.0, 0.5, 1.0])
        class_0_y = np.asarray([0.5, 0.5, 0.5])
        calibration_config = calibration_pb2.CalibrationConfig()
        self._add_function_approximation_to_calibration_proto(
            calibration_config, class_0_x, class_0_y, class_id=0)

        # Class id 1 will interpolate using these values.
        class_1_x = np.asarray([0.0, 0.2, 1.0])
        class_1_y = np.asarray([0.0, 0.6, 1.0])
        self._add_function_approximation_to_calibration_proto(
            calibration_config, class_1_x, class_1_y, class_id=1)

        od_graph = tf.Graph()
        with self.test_session(graph=od_graph) as sess:
            calibration_fn = calibration_builder.build(calibration_config)
            # batch_size = 2, num_classes = 2, num_anchors = 2.
            class_predictions_with_background = tf.constant(
                [[[0.1, 0.2], [0.9, 0.1]], [[0.6, 0.4], [0.08, 0.92]]],
                dtype=tf.float32)
            calibrated_scores = calibration_fn(
                class_predictions_with_background)
            calibrated_scores_np = sess.run(calibrated_scores)
        self.assertAllClose(
            calibrated_scores_np,
            [[[0.5, 0.6], [0.5, 0.3]], [[0.5, 0.7], [0.5, 0.96]]])
コード例 #4
0
    def test_class_agnostic_function_approximation(self):
        """Tests that calibration produces correct class-agnostic values."""
        # Generate fake calibration proto. For this interpolation, any input on
        # [0.0, 0.5] should be divided by 2 and any input on (0.5, 1.0] should have
        # 0.25 subtracted from it.
        class_agnostic_x = np.asarray([0.0, 0.5, 1.0])
        class_agnostic_y = np.asarray([0.0, 0.25, 0.75])
        calibration_config = calibration_pb2.CalibrationConfig()
        self._add_function_approximation_to_calibration_proto(
            calibration_config,
            class_agnostic_x,
            class_agnostic_y,
            class_id=None)

        od_graph = tf.Graph()
        with self.test_session(graph=od_graph) as sess:
            calibration_fn = calibration_builder.build(calibration_config)
            # batch_size = 2, num_classes = 2, num_anchors = 2.
            class_predictions_with_background = tf.constant(
                [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
                 [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
                dtype=tf.float32)

            # Everything should map to 0.5 if classes are ignored.
            calibrated_scores = calibration_fn(
                class_predictions_with_background)
            calibrated_scores_np = sess.run(calibrated_scores)
        self.assertAllClose(calibrated_scores_np,
                            [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
                             [[0.35, 0.45, 0.55], [0.65, 0.75, 0.75]]])
コード例 #5
0
def _build_calibrated_score_converter(score_converter_fn, calibration_config):
  
  calibration_fn = calibration_builder.build(calibration_config)
  def calibrated_score_converter_fn(logits):
    converted_logits = score_converter_fn(logits)
    return calibration_fn(converted_logits)
  calibrated_score_converter_fn.__name__ = (
      'calibrate_with_%s' % calibration_config.WhichOneof('calibrator'))
  return calibrated_score_converter_fn
コード例 #6
0
  def test_temperature_scaling_incorrect_value_error(self):
    calibration_config = calibration_pb2.CalibrationConfig()
    calibration_config.temperature_scaling_calibration.scaler = 0

    calibration_fn = calibration_builder.build(calibration_config)
    class_predictions_with_background = tf.constant(
        [[[0.1, 0.2, 0.3]]], dtype=tf.float32)
    with self.assertRaises(ValueError):
      calibration_fn(class_predictions_with_background)
コード例 #7
0
 def graph_fn():
     calibration_fn = calibration_builder.build(calibration_config)
     # batch_size = 2, num_classes = 2, num_anchors = 2.
     class_predictions_with_background = tf.constant(
         [[[0.1, 0.2], [0.9, 0.1]], [[0.6, 0.4], [0.08, 0.92]]],
         dtype=tf.float32)
     calibrated_scores = calibration_fn(
         class_predictions_with_background)
     return calibrated_scores
コード例 #8
0
        def graph_fn():
            calibration_fn = calibration_builder.build(calibration_config)
            # batch_size = 2, num_classes = 2, num_anchors = 2.
            class_predictions_with_background = tf.constant(
                [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
                 [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
                dtype=tf.float32)

            # Everything should map to 0.5 if classes are ignored.
            calibrated_scores = calibration_fn(
                class_predictions_with_background)
            return calibrated_scores
コード例 #9
0
  def test_temperature_scaling(self):
    """Tests that calibration produces correct temperature scaling values."""
    calibration_config = calibration_pb2.CalibrationConfig()
    calibration_config.temperature_scaling_calibration.scaler = 2.0

    od_graph = tf.Graph()
    with self.test_session(graph=od_graph) as sess:
      calibration_fn = calibration_builder.build(calibration_config)
      # batch_size = 2, num_classes = 2, num_anchors = 2.
      class_predictions_with_background = tf.constant(
          [[[0.1, 0.2, 0.3], [0.4, 0.5, 0.0]],
           [[0.6, 0.7, 0.8], [0.9, 1.0, 1.0]]],
          dtype=tf.float32)
      calibrated_scores = calibration_fn(class_predictions_with_background)
      calibrated_scores_np = sess.run(calibrated_scores)
    self.assertAllClose(calibrated_scores_np,
                        [[[0.05, 0.1, 0.15], [0.2, 0.25, 0.0]],
                         [[0.3, 0.35, 0.4], [0.45, 0.5, 0.5]]])
コード例 #10
0
 def test_skips_class_when_calibration_parameters_not_present(self):
   """Tests that graph fails when parameters not present for all classes."""
   # Only adding calibration parameters for class id = 0, even though class id
   # 1 is present in the data.
   class_0_x = np.asarray([0.0, 0.5, 1.0])
   class_0_y = np.asarray([0.5, 0.5, 0.5])
   calibration_config = calibration_pb2.CalibrationConfig()
   self._add_function_approximation_to_calibration_proto(
       calibration_config, class_0_x, class_0_y, class_id=0)
   od_graph = tf.Graph()
   with self.test_session(graph=od_graph) as sess:
     calibration_fn = calibration_builder.build(calibration_config)
     # batch_size = 2, num_classes = 2, num_anchors = 2.
     class_predictions_with_background = tf.constant(
         [[[0.1, 0.2], [0.9, 0.1]],
          [[0.6, 0.4], [0.08, 0.92]]],
         dtype=tf.float32)
     calibrated_scores = calibration_fn(class_predictions_with_background)
     calibrated_scores_np = sess.run(calibrated_scores)
   self.assertAllClose(calibrated_scores_np, [[[0.5, 0.2], [0.5, 0.1]],
                                              [[0.5, 0.4], [0.5, 0.92]]])