Exemplo n.º 1
0
    def testDecodeJpegImageAndBoundingBox(self):
        """Test if the decoder can correctly decode the image and bounding box.

    A set of random images (represented as an image tensor) is first decoded as
    the groundtrue image. Meanwhile, the image tensor will be encoded and pass
    through the sequence example, and then decoded as images. The groundtruth
    image and the decoded image are expected to be equal. Similar tests are
    also applied to labels such as bounding box.
    """
        image_tensor = np.random.randint(256,
                                         size=(256, 256, 3)).astype(np.uint8)
        encoded_jpeg = self._EncodeImage(image_tensor)
        decoded_jpeg = self._DecodeImage(encoded_jpeg)

        sequence_example = example_pb2.SequenceExample(
            feature_lists=feature_pb2.FeatureLists(
                feature_list={
                    'image/encoded':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                            value=[encoded_jpeg])),
                    ]),
                    'bbox/xmin':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[0.0])),
                    ]),
                    'bbox/xmax':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[1.0]))
                    ]),
                    'bbox/ymin':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[0.0])),
                    ]),
                    'bbox/ymax':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[1.0]))
                    ]),
                })).SerializeToString()

        example_decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder(
        )
        tensor_dict = example_decoder.decode(
            tf.convert_to_tensor(sequence_example))

        # Test tensor dict image dimension.
        self.assertAllEqual(
            (tensor_dict[fields.InputDataFields.image].get_shape().as_list()),
            [None, None, None, 3])
        with self.test_session() as sess:
            tensor_dict[fields.InputDataFields.image] = tf.squeeze(
                tensor_dict[fields.InputDataFields.image])
            tensor_dict[fields.InputDataFields.groundtruth_boxes] = tf.squeeze(
                tensor_dict[fields.InputDataFields.groundtruth_boxes])
            tensor_dict = sess.run(tensor_dict)

        # Test decoded image.
        self.assertAllEqual(decoded_jpeg,
                            tensor_dict[fields.InputDataFields.image])
        # Test decoded bounding box.
        self.assertAllEqual(
            [0.0, 0.0, 1.0, 1.0],
            tensor_dict[fields.InputDataFields.groundtruth_boxes])
Exemplo n.º 2
0
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging

# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample


def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
                                flat_output):
  tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))

  i = 0  # Index into the flattened output of session.run()
  for k, v in sorted(dict_tensors.items()):
    # TODO(shivaniagrawal): flat_output is same as v.
    expected_v = expected_tensors[k]
    tf_logging.info("Comparing key: %s", k)
    print("i", i, "flat_output", flat_output[i], "expected_v", expected_v)
    if sparse_tensor.is_sparse(v):
    def _create_tf_record(self):
        path = os.path.join(self.get_temp_dir(), 'tfrecord')
        writer = tf.python_io.TFRecordWriter(path)

        image_tensor = np.random.randint(255,
                                         size=(16, 16, 3)).astype(np.uint8)
        with self.test_session():
            encoded_jpeg = tf.image.encode_jpeg(
                tf.constant(image_tensor)).eval()

        sequence_example = example_pb2.SequenceExample(
            context=feature_pb2.Features(
                feature={
                    'image/format':
                    feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                        value=['jpeg'.encode('utf-8')])),
                    'image/height':
                    feature_pb2.Feature(int64_list=feature_pb2.Int64List(
                        value=[16])),
                    'image/width':
                    feature_pb2.Feature(int64_list=feature_pb2.Int64List(
                        value=[16])),
                }),
            feature_lists=feature_pb2.FeatureLists(
                feature_list={
                    'image/encoded':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                            value=[encoded_jpeg])),
                    ]),
                    'image/object/bbox/xmin':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[0.0])),
                    ]),
                    'image/object/bbox/xmax':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[1.0]))
                    ]),
                    'image/object/bbox/ymin':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[0.0])),
                    ]),
                    'image/object/bbox/ymax':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[1.0]))
                    ]),
                    'image/object/class/label':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(int64_list=feature_pb2.Int64List(
                            value=[2]))
                    ]),
                }))

        writer.write(sequence_example.SerializeToString())
        writer.close()

        return path