def test_raises_error_with_no_input_paths(self):
   input_reader_text_proto = """
     shuffle: false
     num_readers: 1
     load_instance_masks: true
   """
   input_reader_proto = input_reader_pb2.InputReader()
   text_format.Merge(input_reader_text_proto, input_reader_proto)
   with self.assertRaises(ValueError):
     input_reader_builder.build(input_reader_proto)
Ejemplo n.º 2
0
 def test_raises_error_with_no_input_paths(self):
     input_reader_text_proto = """
   shuffle: false
   num_readers: 1
   load_instance_masks: true
 """
     input_reader_proto = input_reader_pb2.InputReader()
     text_format.Merge(input_reader_text_proto, input_reader_proto)
     with self.assertRaises(ValueError):
         input_reader_builder.build(input_reader_proto)
Ejemplo n.º 3
0
    def test_build_tf_record_input_reader_and_load_instance_masks(self):
        tf_record_path = self.create_tf_record()

        input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
        input_reader_proto = input_reader_pb2.InputReader()
        text_format.Merge(input_reader_text_proto, input_reader_proto)
        tensor_dict = input_reader_builder.build(input_reader_proto)

        with tf.train.MonitoredSession() as sess:
            output_dict = sess.run(tensor_dict)

        self.assertEquals((4, 5, 3),
                          output_dict[fields.InputDataFields.image].shape)
        self.assertEquals(
            [2], output_dict[fields.InputDataFields.groundtruth_classes])
        self.assertEquals(
            (1, 4),
            output_dict[fields.InputDataFields.groundtruth_boxes].shape)
        self.assertAllEqual(
            [0.0, 0.0, 1.0, 1.0],
            output_dict[fields.InputDataFields.groundtruth_boxes][0])
        self.assertAllEqual((1, 4, 5), output_dict[
            fields.InputDataFields.groundtruth_instance_masks].shape)
  def test_build_tf_record_input_reader_and_load_instance_masks(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      load_instance_masks: true
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = input_reader_builder.build(input_reader_proto)

    with tf.train.MonitoredSession() as sess:
      output_dict = sess.run(tensor_dict)

    self.assertEquals(
        (4, 5, 3), output_dict[fields.InputDataFields.image].shape)
    self.assertEquals(
        [2], output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0])
    self.assertAllEqual(
        (1, 4, 5),
        output_dict[fields.InputDataFields.groundtruth_instance_masks].shape)
    def test_build_tf_record_input_reader(self):
        tf_record_path = self.create_tf_record()

        input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
        input_reader_proto = input_reader_pb2.InputReader()
        text_format.Merge(input_reader_text_proto, input_reader_proto)
        tensor_dict = input_reader_builder.build(input_reader_proto)

        sv = tf.train.Supervisor(logdir=self.get_temp_dir())
        with sv.prepare_or_wait_for_session() as sess:
            sv.start_queue_runners(sess)
            output_dict = sess.run(tensor_dict)

        self.assertTrue(fields.InputDataFields.groundtruth_instance_masks
                        not in output_dict)
        self.assertEquals((4, 5, 3),
                          output_dict[fields.InputDataFields.image].shape)
        self.assertEquals(
            [2], output_dict[fields.InputDataFields.groundtruth_classes])
        self.assertEquals(
            (1, 4),
            output_dict[fields.InputDataFields.groundtruth_boxes].shape)
        self.assertAllEqual(
            [0.0, 0.0, 1.0, 1.0],
            output_dict[fields.InputDataFields.groundtruth_boxes][0])
  def test_build_tf_record_input_reader(self):
    tf_record_path = self.create_tf_record()

    input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
    input_reader_proto = input_reader_pb2.InputReader()
    text_format.Merge(input_reader_text_proto, input_reader_proto)
    tensor_dict = input_reader_builder.build(input_reader_proto)

    sv = tf.train.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      output_dict = sess.run(tensor_dict)

    self.assertEquals(
        (4, 5, 3), output_dict[fields.InputDataFields.image].shape)
    self.assertEquals(
        [2], output_dict[fields.InputDataFields.groundtruth_classes])
    self.assertEquals(
        (1, 4), output_dict[fields.InputDataFields.groundtruth_boxes].shape)
    self.assertAllEqual(
        [0.0, 0.0, 1.0, 1.0],
        output_dict[fields.InputDataFields.groundtruth_boxes][0])
Ejemplo n.º 7
0
    def test_build_tf_record_input_reader_sequence_example(self):
        tf_record_path = self.create_tf_record_sequence_example()

        input_reader_text_proto = """
      shuffle: false
      num_readers: 1
      input_type: TF_SEQUENCE_EXAMPLE
      tf_record_input_reader {{
        input_path: '{0}'
      }}
    """.format(tf_record_path)
        input_reader_proto = input_reader_pb2.InputReader()
        input_reader_proto.label_map_path = _get_labelmap_path()
        text_format.Merge(input_reader_text_proto, input_reader_proto)
        tensor_dict = input_reader_builder.build(input_reader_proto)

        with tf.train.MonitoredSession() as sess:
            output_dict = sess.run(tensor_dict)

        expected_groundtruth_classes = [[-1, -1], [1, -1], [1, 2], [-1, -1]]
        expected_groundtruth_boxes = [[[0.0, 0.0, 0.0, 0.0],
                                       [0.0, 0.0, 0.0, 0.0]],
                                      [[0.0, 0.0, 1.0, 1.0],
                                       [0.0, 0.0, 0.0, 0.0]],
                                      [[0.0, 0.0, 1.0, 1.0],
                                       [0.1, 0.1, 0.2, 0.2]],
                                      [[0.0, 0.0, 0.0, 0.0],
                                       [0.0, 0.0, 0.0, 0.0]]]
        expected_num_groundtruth_boxes = [0, 1, 2, 0]

        self.assertNotIn(fields.InputDataFields.groundtruth_instance_masks,
                         output_dict)
        # sequence example images are encoded
        self.assertEqual((4, ),
                         output_dict[fields.InputDataFields.image].shape)
        self.assertAllEqual(
            expected_groundtruth_classes,
            output_dict[fields.InputDataFields.groundtruth_classes])
        self.assertEqual(
            (4, 2, 4),
            output_dict[fields.InputDataFields.groundtruth_boxes].shape)
        self.assertAllClose(
            expected_groundtruth_boxes,
            output_dict[fields.InputDataFields.groundtruth_boxes])
        self.assertAllClose(
            expected_num_groundtruth_boxes,
            output_dict[fields.InputDataFields.num_groundtruth_boxes])
Ejemplo n.º 8
0
# tensor_dict[fields.InputDataFields.image] = float_images

# input_queue = batcher.BatchQueue(
#     tensor_dict,
#     batch_size=30,
#     batch_queue_capacity=100,
#     num_batch_queue_threads=4,
#     prefetch_queue_capacity=100)

### it seems that the code must have a feed_dict, so I have to run the queue.pop(tensor_dict) to get the data then feed the feed_dict.
#  Commonly, if we have a queue, can we direct use the tensor, and run the final output tensor?
with detection_graph.as_default():
    with tf.Session(graph=detection_graph) as sess:
        input_reader_proto = input_reader_pb2.InputReader()
        text_format.Merge(input_reader_text_proto, input_reader_proto)
        tensor_dict = input_reader_builder.build(input_reader_proto)
        print 'type tensor_dict', type(tensor_dict)
        #print 'tensor_dict', tensor_dict
        # sv.start_queue_runners(sess)
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
        # Each box represents a part of the image where a particular object was detected.
        detection_boxes = detection_graph.get_tensor_by_name(
            'detection_boxes:0')
        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        detection_scores = detection_graph.get_tensor_by_name(
            'detection_scores:0')
        detection_classes = detection_graph.get_tensor_by_name(
            'detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')
        outfileset = {}