Exemplo n.º 1
0
  def extract_images_and_targets(read_data):
    """Extract images and targets from the input dict."""
    image = read_data[fields.InputDataFields.image]
    key = ''
    if fields.InputDataFields.source_id in read_data:
      key = read_data[fields.InputDataFields.source_id]
    location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
    classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
                         tf.int32)
    classes_gt -= label_id_offset

    if merge_multiple_label_boxes and use_multiclass_scores:
      raise ValueError(
          'Using both merge_multiple_label_boxes and use_multiclass_scores is'
          'not supported'
      )

    if merge_multiple_label_boxes:
      location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
          location_gt, classes_gt, num_classes)
    elif use_multiclass_scores:
      classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores],
                           tf.float32)
    else:
      classes_gt = util_ops.padded_one_hot_encoding(
          indices=classes_gt, depth=num_classes, left_pad=0)
    masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
    keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints)
    if (merge_multiple_label_boxes and (
        masks_gt is not None or keypoints_gt is not None)):
      raise NotImplementedError('Multi-label support is only for boxes.')
    weights_gt = read_data.get(
        fields.InputDataFields.groundtruth_weights)
    return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
            weights_gt)
Exemplo n.º 2
0
 def extract_images_and_targets(read_data):
   image = read_data[fields.InputDataFields.image]
   location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
   classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
                        tf.int32)
   classes_gt -= label_id_offset
   classes_gt = util_ops.padded_one_hot_encoding(indices=classes_gt,
                                                 depth=num_classes, left_pad=0)
   masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
   return image, location_gt, classes_gt, masks_gt
Exemplo n.º 3
0
 def test_correct_padded_one_hot_tensor_with_empty_indices(self):
   depth = 6
   pad = 2
   indices = tf.constant([])
   one_hot_tensor = ops.padded_one_hot_encoding(
       indices, depth=depth, left_pad=pad)
   expected_tensor = np.zeros((0, depth + pad))
   with self.test_session() as sess:
     out_one_hot_tensor = sess.run(one_hot_tensor)
     self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
                         atol=1e-10)
Exemplo n.º 4
0
 def test_correct_one_hot_tensor_with_pad_three(self):
   indices = tf.constant([1, 2, 3, 5])
   one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=6, left_pad=3)
   expected_tensor = np.array([[0, 0, 0, 0, 1, 0, 0, 0, 0],
                               [0, 0, 0, 0, 0, 1, 0, 0, 0],
                               [0, 0, 0, 0, 0, 0, 1, 0, 0],
                               [0, 0, 0, 0, 0, 0, 0, 0, 1]], np.float32)
   with self.test_session() as sess:
     out_one_hot_tensor = sess.run(one_hot_tensor)
     self.assertAllClose(out_one_hot_tensor, expected_tensor, rtol=1e-10,
                         atol=1e-10)
Exemplo n.º 5
0
  def extract_images_and_targets(read_data):
    """Extract images and targets from the input dict."""
    suffix = 0

    images = []
    keys = []
    locations = []
    classes = []
    masks = []
    keypoints = []

    while fields.InputDataFields.image + str(suffix) in read_data:
      image = read_data[fields.InputDataFields.image + str(suffix)]
      key = ''
      if fields.InputDataFields.source_id in read_data:
        key = read_data[fields.InputDataFields.source_id + str(suffix)]
      location_gt = (
          read_data[fields.InputDataFields.groundtruth_boxes + str(suffix)])
      classes_gt = tf.cast(
          read_data[fields.InputDataFields.groundtruth_classes + str(suffix)],
          tf.int32)
      classes_gt -= label_id_offset
      masks_gt = read_data.get(
          fields.InputDataFields.groundtruth_instance_masks + str(suffix))
      keypoints_gt = read_data.get(
          fields.InputDataFields.groundtruth_keypoints + str(suffix))

      if merge_multiple_label_boxes:
        location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
            location_gt, classes_gt, num_classes)
      else:
        classes_gt = util_ops.padded_one_hot_encoding(
            indices=classes_gt, depth=num_classes, left_pad=0)

      # Batch read input data and groundtruth. Images and locations, classes by
      # default should have the same number of items.
      images.append(image)
      keys.append(key)
      locations.append(location_gt)
      classes.append(classes_gt)
      masks.append(masks_gt)
      keypoints.append(keypoints_gt)

      suffix += 1

    return (images, keys, locations, classes, masks, keypoints)
Exemplo n.º 6
0
    def extract_images_and_targets(read_data):
        """Extract images and targets from the input dict."""
        image = read_data[fields.InputDataFields.image]
        key = ''
        if fields.InputDataFields.source_id in read_data:
            key = read_data[fields.InputDataFields.source_id]
        location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
        classes_gt = tf.cast(
            read_data[fields.InputDataFields.groundtruth_classes], tf.int32)
        classes_gt -= label_id_offset

        if merge_multiple_label_boxes and use_multiclass_scores:
            raise ValueError(
                'Using both merge_multiple_label_boxes and use_multiclass_scores is'
                'not supported')

        if merge_multiple_label_boxes:
            location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels(
                location_gt, classes_gt, num_classes)
            classes_gt = tf.cast(classes_gt, tf.float32)
        elif use_multiclass_scores:
            classes_gt = tf.cast(
                read_data[fields.InputDataFields.multiclass_scores],
                tf.float32)
        else:
            classes_gt = util_ops.padded_one_hot_encoding(indices=classes_gt,
                                                          depth=num_classes,
                                                          left_pad=0)
        masks_gt = read_data.get(
            fields.InputDataFields.groundtruth_instance_masks)
        keypoints_gt = read_data.get(
            fields.InputDataFields.groundtruth_keypoints)
        if (merge_multiple_label_boxes
                and (masks_gt is not None or keypoints_gt is not None)):
            raise NotImplementedError('Multi-label support is only for boxes.')
        weights_gt = read_data.get(fields.InputDataFields.groundtruth_weights)
        return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt,
                weights_gt)
Exemplo n.º 7
0
 def test_raise_value_error_on_float_depth(self):
   indices = tf.constant(1.0, shape=(2, 3))
   with self.assertRaises(ValueError):
     ops.padded_one_hot_encoding(indices, depth=0.1, left_pad=2)
Exemplo n.º 8
0
 def test_raise_value_error_on_negative_pad(self):
   indices = tf.constant(1.0, shape=(2, 3))
   with self.assertRaises(ValueError):
     ops.padded_one_hot_encoding(indices, depth=6, left_pad=-1)
Exemplo n.º 9
0
 def test_return_none_on_zero_depth(self):
   indices = tf.constant([1, 2, 3, 4, 5])
   one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=0, left_pad=2)
   self.assertEqual(one_hot_tensor, None)
Exemplo n.º 10
0
 def test_raise_value_error_on_float_depth(self):
   indices = tf.constant(1.0, shape=(2, 3))
   with self.assertRaises(ValueError):
     ops.padded_one_hot_encoding(indices, depth=0.1, left_pad=2)
Exemplo n.º 11
0
 def test_raise_value_error_on_negative_pad(self):
   indices = tf.constant(1.0, shape=(2, 3))
   with self.assertRaises(ValueError):
     ops.padded_one_hot_encoding(indices, depth=6, left_pad=-1)
Exemplo n.º 12
0
 def test_return_none_on_zero_depth(self):
   indices = tf.constant([1, 2, 3, 4, 5])
   one_hot_tensor = ops.padded_one_hot_encoding(indices, depth=0, left_pad=2)
   self.assertEqual(one_hot_tensor, None)