예제 #1
0
        def graph_fn():
            keypoints = tf.constant([[[0.0, 0.0], [100.0, 200.0]],
                                     [[50.0, 120.0], [100.0, 140.0]]])
            y_scale = tf.constant(1.0 / 100)
            x_scale = tf.constant(1.0 / 200)

            expected_keypoints = tf.constant([[[0., 0.], [1.0, 1.0]],
                                              [[0.5, 0.6], [1.0, 0.7]]])
            output = keypoint_ops.scale(keypoints, y_scale, x_scale)
            return output, expected_keypoints
예제 #2
0
    def test_scale(self):
        keypoints = tf.constant([[[0.0, 0.0], [100.0, 200.0]],
                                 [[50.0, 120.0], [100.0, 140.0]]])
        y_scale = tf.constant(1.0 / 100)
        x_scale = tf.constant(1.0 / 200)

        expected_keypoints = tf.constant([[[0., 0.], [1.0, 1.0]],
                                          [[0.5, 0.6], [1.0, 0.7]]])
        output = keypoint_ops.scale(keypoints, y_scale, x_scale)

        with self.test_session() as sess:
            output_, expected_keypoints_ = sess.run(
                [output, expected_keypoints])
            self.assertAllClose(output_, expected_keypoints_)
예제 #3
0
  def test_scale(self):
    keypoints = tf.constant([
        [[0.0, 0.0], [100.0, 200.0]],
        [[50.0, 120.0], [100.0, 140.0]]
    ])
    y_scale = tf.constant(1.0 / 100)
    x_scale = tf.constant(1.0 / 200)

    expected_keypoints = tf.constant([
        [[0., 0.], [1.0, 1.0]],
        [[0.5, 0.6], [1.0, 0.7]]
    ])
    output = keypoint_ops.scale(keypoints, y_scale, x_scale)

    with self.test_session() as sess:
      output_, expected_keypoints_ = sess.run([output, expected_keypoints])
      self.assertAllClose(output_, expected_keypoints_)
예제 #4
0
def result_dict_for_single_example(image,
                                   key,
                                   detections,
                                   groundtruth=None,
                                   class_agnostic=False,
                                   scale_to_absolute=False):
  """Merges all detection and groundtruth information for a single example.

  Note that evaluation tools require classes that are 1-indexed, and so this
  function performs the offset. If `class_agnostic` is True, all output classes
  have label 1.

  Args:
    image: A single 4D uint8 image tensor of shape [1, H, W, C].
    key: A single string tensor identifying the image.
    detections: A dictionary of detections, returned from
      DetectionModel.postprocess().
    groundtruth: (Optional) Dictionary of groundtruth items, with fields:
      'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
        normalized coordinates.
      'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
      'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
      'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
      'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
      'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
      'groundtruth_instance_masks': 3D int64 tensor of instance masks
        (Optional).
    class_agnostic: Boolean indicating whether the detections are class-agnostic
      (i.e. binary). Default False.
    scale_to_absolute: Boolean indicating whether boxes and keypoints should be
      scaled to absolute coordinates. Note that for IoU based evaluations, it
      does not matter whether boxes are expressed in absolute or relative
      coordinates. Default False.

  Returns:
    A dictionary with:
    'original_image': A [1, H, W, C] uint8 image tensor.
    'key': A string tensor with image identifier.
    'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
      normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`.
    'detection_scores': [max_detections] float32 tensor of scores.
    'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
    'detection_masks': [max_detections, H, W] float32 tensor of binarized
      masks, reframed to full image masks.
    'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
      normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`. (Optional)
    'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
      (Optional)
    'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
    'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
    'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
    'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
    'groundtruth_instance_masks': 3D int64 tensor of instance masks
      (Optional).

  """
  label_id_offset = 1  # Applying label id offset (b/63711816)

  input_data_fields = fields.InputDataFields
  output_dict = {
      input_data_fields.original_image: image,
      input_data_fields.key: key,
  }

  detection_fields = fields.DetectionResultFields
  detection_boxes = detections[detection_fields.detection_boxes][0]
  image_shape = tf.shape(image)
  detection_scores = detections[detection_fields.detection_scores][0]

  if class_agnostic:
    detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
  else:
    detection_classes = (
        tf.to_int64(detections[detection_fields.detection_classes][0]) +
        label_id_offset)

  num_detections = tf.to_int32(detections[detection_fields.num_detections][0])
  detection_boxes = tf.slice(
      detection_boxes, begin=[0, 0], size=[num_detections, -1])
  detection_classes = tf.slice(
      detection_classes, begin=[0], size=[num_detections])
  detection_scores = tf.slice(
      detection_scores, begin=[0], size=[num_detections])

  if scale_to_absolute:
    absolute_detection_boxlist = box_list_ops.to_absolute_coordinates(
        box_list.BoxList(detection_boxes), image_shape[1], image_shape[2])
    output_dict[detection_fields.detection_boxes] = (
        absolute_detection_boxlist.get())
  else:
    output_dict[detection_fields.detection_boxes] = detection_boxes
  output_dict[detection_fields.detection_classes] = detection_classes
  output_dict[detection_fields.detection_scores] = detection_scores

  if detection_fields.detection_masks in detections:
    detection_masks = detections[detection_fields.detection_masks][0]
    # TODO(rathodv): This should be done in model's postprocess
    # function ideally.
    detection_masks = tf.slice(
        detection_masks, begin=[0, 0, 0], size=[num_detections, -1, -1])
    detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
        detection_masks, detection_boxes, image_shape[1], image_shape[2])
    detection_masks_reframed = tf.cast(
        tf.greater(detection_masks_reframed, 0.5), tf.uint8)
    output_dict[detection_fields.detection_masks] = detection_masks_reframed
  if detection_fields.detection_keypoints in detections:
    detection_keypoints = detections[detection_fields.detection_keypoints][0]
    output_dict[detection_fields.detection_keypoints] = detection_keypoints
    if scale_to_absolute:
      absolute_detection_keypoints = keypoint_ops.scale(
          detection_keypoints, image_shape[1], image_shape[2])
      output_dict[detection_fields.detection_keypoints] = (
          absolute_detection_keypoints)

  if groundtruth:
    if input_data_fields.groundtruth_instance_masks in groundtruth:
      groundtruth[input_data_fields.groundtruth_instance_masks] = tf.cast(
          groundtruth[input_data_fields.groundtruth_instance_masks], tf.uint8)
    output_dict.update(groundtruth)
    if scale_to_absolute:
      groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
      absolute_gt_boxlist = box_list_ops.to_absolute_coordinates(
          box_list.BoxList(groundtruth_boxes), image_shape[1], image_shape[2])
      output_dict[input_data_fields.groundtruth_boxes] = (
          absolute_gt_boxlist.get())
    # For class-agnostic models, groundtruth classes all become 1.
    if class_agnostic:
      groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
      groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
      output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes

  return output_dict
예제 #5
0
def _scale_keypoint_to_absolute(args):
    keypoints, image_shape = args
    return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
예제 #6
0
def result_dict_for_single_example(image,
                                   key,
                                   detections,
                                   groundtruth=None,
                                   class_agnostic=False,
                                   scale_to_absolute=False):
  """Merges all detection and groundtruth information for a single example.

  Note that evaluation tools require classes that are 1-indexed, and so this
  function performs the offset. If `class_agnostic` is True, all output classes
  have label 1.

  Args:
    image: A single 4D uint8 image tensor of shape [1, H, W, C].
    key: A single string tensor identifying the image.
    detections: A dictionary of detections, returned from
      DetectionModel.postprocess().
    groundtruth: (Optional) Dictionary of groundtruth items, with fields:
      'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
        normalized coordinates.
      'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
      'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
      'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
      'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
      'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
      'groundtruth_instance_masks': 3D int64 tensor of instance masks
        (Optional).
    class_agnostic: Boolean indicating whether the detections are class-agnostic
      (i.e. binary). Default False.
    scale_to_absolute: Boolean indicating whether boxes and keypoints should be
      scaled to absolute coordinates. Note that for IoU based evaluations, it
      does not matter whether boxes are expressed in absolute or relative
      coordinates. Default False.

  Returns:
    A dictionary with:
    'original_image': A [1, H, W, C] uint8 image tensor.
    'key': A string tensor with image identifier.
    'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
      normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`.
    'detection_scores': [max_detections] float32 tensor of scores.
    'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
    'detection_masks': [max_detections, H, W] float32 tensor of binarized
      masks, reframed to full image masks.
    'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
      normalized or absolute coordinates, depending on the value of
      `scale_to_absolute`. (Optional)
    'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
      (Optional)
    'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
    'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
    'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
    'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
    'groundtruth_instance_masks': 3D int64 tensor of instance masks
      (Optional).

  """
  label_id_offset = 1  # Applying label id offset (b/63711816)

  input_data_fields = fields.InputDataFields
  output_dict = {
      input_data_fields.original_image: image,
      input_data_fields.key: key,
  }

  detection_fields = fields.DetectionResultFields
  detection_boxes = detections[detection_fields.detection_boxes][0]
  image_shape = tf.shape(image)
  detection_scores = detections[detection_fields.detection_scores][0]

  if class_agnostic:
    detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
  else:
    detection_classes = (
        tf.to_int64(detections[detection_fields.detection_classes][0]) +
        label_id_offset)

  num_detections = tf.to_int32(detections[detection_fields.num_detections][0])
  detection_boxes = tf.slice(
      detection_boxes, begin=[0, 0], size=[num_detections, -1])
  detection_classes = tf.slice(
      detection_classes, begin=[0], size=[num_detections])
  detection_scores = tf.slice(
      detection_scores, begin=[0], size=[num_detections])

  if scale_to_absolute:
    absolute_detection_boxlist = box_list_ops.to_absolute_coordinates(
        box_list.BoxList(detection_boxes), image_shape[1], image_shape[2])
    output_dict[detection_fields.detection_boxes] = (
        absolute_detection_boxlist.get())
  else:
    output_dict[detection_fields.detection_boxes] = detection_boxes
  output_dict[detection_fields.detection_classes] = detection_classes
  output_dict[detection_fields.detection_scores] = detection_scores

  if detection_fields.detection_masks in detections:
    detection_masks = detections[detection_fields.detection_masks][0]
    # TODO(rathodv): This should be done in model's postprocess
    # function ideally.
    detection_masks = tf.slice(
        detection_masks, begin=[0, 0, 0], size=[num_detections, -1, -1])
    detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
        detection_masks, detection_boxes, image_shape[1], image_shape[2])
    detection_masks_reframed = tf.cast(
        tf.greater(detection_masks_reframed, 0.5), tf.uint8)
    output_dict[detection_fields.detection_masks] = detection_masks_reframed
  if detection_fields.detection_keypoints in detections:
    detection_keypoints = detections[detection_fields.detection_keypoints][0]
    output_dict[detection_fields.detection_keypoints] = detection_keypoints
    if scale_to_absolute:
      absolute_detection_keypoints = keypoint_ops.scale(
          detection_keypoints, image_shape[1], image_shape[2])
      output_dict[detection_fields.detection_keypoints] = (
          absolute_detection_keypoints)

  if groundtruth:
    if input_data_fields.groundtruth_instance_masks in groundtruth:
      groundtruth[input_data_fields.groundtruth_instance_masks] = tf.cast(
          groundtruth[input_data_fields.groundtruth_instance_masks], tf.uint8)
    output_dict.update(groundtruth)
    if scale_to_absolute:
      groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
      absolute_gt_boxlist = box_list_ops.to_absolute_coordinates(
          box_list.BoxList(groundtruth_boxes), image_shape[1], image_shape[2])
      output_dict[input_data_fields.groundtruth_boxes] = (
          absolute_gt_boxlist.get())
    # For class-agnostic models, groundtruth classes all become 1.
    if class_agnostic:
      groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
      groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
      output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes

  return output_dict
예제 #7
0
def _scale_keypoint_to_absolute(args):
  keypoints, image_shape = args
  return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])