Example #1
0
    def graph_fn():
      t1 = tf.constant([1], dtype=tf.int32)
      tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2))
      t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
      tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2))

      t3 = tf.constant([1, 2, 3], dtype=tf.int32)
      tt3 = shape_utils.clip_tensor(t3, tf.constant(2))
      t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
      tt4 = shape_utils.clip_tensor(t4, tf.constant(2))

      return tt1, tt2, tt3, tt4
Example #2
0
    def graph_fn():
      t1 = tf.constant([1], dtype=tf.int32)
      tt1 = shape_utils.pad_or_clip_tensor(t1, 2)
      t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
      tt2 = shape_utils.pad_or_clip_tensor(t2, 2)

      t3 = tf.constant([1, 2, 3], dtype=tf.int32)
      tt3 = shape_utils.clip_tensor(t3, 2)
      t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
      tt4 = shape_utils.clip_tensor(t4, 2)

      self.assertEqual(2, tt1.get_shape()[0])
      self.assertEqual(2, tt2.get_shape()[0])
      self.assertEqual(2, tt3.get_shape()[0])
      self.assertEqual(2, tt4.get_shape()[0])

      return tt1, tt2, tt3, tt4
Example #3
0
    def _per_batch_gather_padding(args):
        """Returns proposal_boxes_result, proposal_score_result and validation.

        Args:
            args[0]: boxes. A float tensor with shape
                        [num_proposals, 4] representing the (potentially zero
                        padded) proposal boxes for all images in the batch.
                        the format is such as [x1,y2,x2,y2]
            args[1]: scores. A float tensor with shape
                        [num_proposals, num_class] representing the (potentially zero
                        added) proposal boxes for all images in the batch.
            args[2]: keep. A bool tensor with shape
                        [num_proposals, 1] representing the coordination needing to
                        keep with true.
            args[3]: max_number. A int tensor with shape
                        [,] representing the kept proposal's number with padding zeros

            Returns:
                    proposal_boxes_result: A float tensor with shape
                        [max_number, 4] representing the (potentially zero
                        padded) proposal boxes for all images in the batch.
                    proposal_score_result: A float tensor with shape
                        [max_number, num_class] representing the (potentially zero
                        padded) proposal boxes for all images in the batch.
            """
        boxes = args[0]
        scores = args[1]
        keep = args[2]
        if proposal_classes is not None:
            classes = args[3]
        result_indice = tf.where(keep)
        result_indice = tf.squeeze(result_indice, axis=-1)
        boxes_result = tf.gather(boxes, result_indice)
        boxes_result = shape_utils.pad_or_clip_tensor(
            boxes_result, max_numbers)
        score_result = tf.gather(scores, result_indice)
        score_result = shape_utils.pad_or_clip_tensor(
            score_result, max_numbers)
        result = [boxes_result, score_result]
        if proposal_classes is not None:
            class_result = tf.gather(classes, result_indice)
            class_result = shape_utils.pad_or_clip_tensor(
                class_result, max_numbers)
            result.append(class_result)
        return result
  def test_pad_or_clip_tensor_using_tensor_input(self):
    t1 = tf.constant([1], dtype=tf.int32)
    tt1 = shape_utils.pad_or_clip_tensor(t1, tf.constant(2))
    t2 = tf.constant([[0.1, 0.2]], dtype=tf.float32)
    tt2 = shape_utils.pad_or_clip_tensor(t2, tf.constant(2))

    t3 = tf.constant([1, 2, 3], dtype=tf.int32)
    tt3 = shape_utils.clip_tensor(t3, tf.constant(2))
    t4 = tf.constant([[0.1, 0.2], [0.2, 0.4], [0.5, 0.8]], dtype=tf.float32)
    tt4 = shape_utils.clip_tensor(t4, tf.constant(2))

    with self.test_session() as sess:
      tt1_result, tt2_result, tt3_result, tt4_result = sess.run(
          [tt1, tt2, tt3, tt4])
      self.assertAllEqual([1, 0], tt1_result)
      self.assertAllClose([[0.1, 0.2], [0, 0]], tt2_result)
      self.assertAllEqual([1, 2], tt3_result)
      self.assertAllClose([[0.1, 0.2], [0.2, 0.4]], tt4_result)
Example #5
0
def pad_or_clip_box_list(boxlist, num_boxes, scope=None):
    """Pads or clips all fields of a BoxList.
    Args:
      boxlist: A BoxList with arbitrary of number of boxes.
      num_boxes: First num_boxes in boxlist are kept.
        The fields are zero-padded if num_boxes is bigger than the
        actual number of boxes.
      scope: name scope.
    Returns:
      BoxList with all fields padded or clipped.
    """
    with tf.name_scope(scope, 'PadOrClipBoxList'):
        subboxlist = box_list.BoxList(
            shape_utils.pad_or_clip_tensor(boxlist.get(), num_boxes))
        for field in boxlist.get_extra_fields():
            subfield = shape_utils.pad_or_clip_tensor(boxlist.get_field(field),
                                                      num_boxes)
            subboxlist.add_field(field, subfield)
        return subboxlist
Example #6
0
def pad_or_clip_box_list(boxlist, num_boxes, scope=None):
  """Pads or clips all fields of a BoxList.

  Args:
    boxlist: A BoxList with arbitrary of number of boxes.
    num_boxes: First num_boxes in boxlist are kept.
      The fields are zero-padded if num_boxes is bigger than the
      actual number of boxes.
    scope: name scope.

  Returns:
    BoxList with all fields padded or clipped.
  """
  with tf.name_scope(scope, 'PadOrClipBoxList'):
    subboxlist = box_list.BoxList(shape_utils.pad_or_clip_tensor(
        boxlist.get(), num_boxes))
    for field in boxlist.get_extra_fields():
      subfield = shape_utils.pad_or_clip_tensor(
          boxlist.get_field(field), num_boxes)
      subboxlist.add_field(field, subfield)
    return subboxlist
Example #7
0
  def score_filter(self, boxes, scores, score_thresh=0.5, max_detections=8192):
    from object_detection.utils import shape_utils

    if scores.shape.ndims != 2:
      raise ValueError('scores field must be of rank 2')
    if boxes.shape.ndims != 3:
      raise ValueError('boxes must be of rank 3.')
    if boxes.shape[2].value != 5:
      raise ValueError('boxes must be of shape [batch, anchors, 5].')

    with tf.name_scope('ScoreFilter'):
      per_image_boxes_list = tf.unstack(boxes)
      per_image_scores_list = tf.unstack(scores)
      detection_boxes_list = []
      detection_scores_list = []
      detection_classes_list = []
      num_detections_list = []
      for (per_image_boxes, per_image_scores
          ) in zip(per_image_boxes_list, per_image_scores_list):
        greater_indexes = tf.cast(tf.reshape(
            tf.where(tf.greater(per_image_scores, score_thresh)),
            [-1]), tf.int32)
        filterd_boxes = tf.gather(per_image_boxes, greater_indexes)
        filterd_scores = tf.gather(per_image_scores, greater_indexes)

        pad_boxes = shape_utils.pad_or_clip_tensor(filterd_boxes, max_detections)
        pad_scores = shape_utils.pad_or_clip_tensor(filterd_scores, max_detections)
        num_detections_list.append(tf.to_float(tf.shape(greater_indexes)[0]))
        detection_boxes_list.append(pad_boxes)
        detection_scores_list.append(pad_scores)
        detection_classes_list.append(tf.ones_like(pad_scores))

      det_dict = {
          'detection_boxes': tf.stack(detection_boxes_list),
          'detection_scores': tf.stack(detection_scores_list),
          'detection_classes': tf.stack(detection_classes_list),
          'num_detections': tf.stack(num_detections_list)
      }
      return det_dict