コード例 #1
0
    def create_tf_record(self):
        path = os.path.join(self.get_temp_dir(), 'tfrecord')
        writer = tf.python_io.TFRecordWriter(path)

        image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
        with self.test_session():
            encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
        example = example_pb2.Example(features=feature_pb2.Features(feature={
            'image/encoded': feature_pb2.Feature(
                bytes_list=feature_pb2.BytesList(value=[encoded_jpeg])),
            'image/format': feature_pb2.Feature(
                bytes_list=feature_pb2.BytesList(value=['jpeg'.encode('utf-8')])),
            'image/object/bbox/xmin': feature_pb2.Feature(
                float_list=feature_pb2.FloatList(value=[0.0])),
            'image/object/bbox/xmax': feature_pb2.Feature(
                float_list=feature_pb2.FloatList(value=[1.0])),
            'image/object/bbox/ymin': feature_pb2.Feature(
                float_list=feature_pb2.FloatList(value=[0.0])),
            'image/object/bbox/ymax': feature_pb2.Feature(
                float_list=feature_pb2.FloatList(value=[1.0])),
            'image/object/class/label': feature_pb2.Feature(
                int64_list=feature_pb2.Int64List(value=[2])),
        }))
        writer.write(example.SerializeToString())
        writer.close()

        return path
コード例 #2
0
    def _test_input_fn_from_parse_example_helper(self, fc_impl, fn_to_run):
        """Tests complete flow with input_fn constructed from parse_example."""
        label_dimension = 2
        batch_size = 10
        data = np.linspace(0.,
                           2.,
                           batch_size * label_dimension,
                           dtype=np.float32)
        data = data.reshape(batch_size, label_dimension)

        serialized_examples = []
        for datum in data:
            example = example_pb2.Example(features=feature_pb2.Features(
                feature={
                    'x':
                    feature_pb2.Feature(float_list=feature_pb2.FloatList(
                        value=datum)),
                    'y':
                    feature_pb2.Feature(float_list=feature_pb2.FloatList(
                        value=datum)),
                }))
            serialized_examples.append(example.SerializeToString())

        feature_spec = {
            'x': parsing_ops.FixedLenFeature([label_dimension],
                                             dtypes.float32),
            'y': parsing_ops.FixedLenFeature([label_dimension],
                                             dtypes.float32),
        }

        def _train_input_fn():
            feature_map = parsing_ops.parse_example(serialized_examples,
                                                    feature_spec)
            features = linear_testing_utils.queue_parsed_features(feature_map)
            labels = features.pop('y')
            return features, labels

        def _eval_input_fn():
            feature_map = parsing_ops.parse_example(
                input_lib.limit_epochs(serialized_examples, num_epochs=1),
                feature_spec)
            features = linear_testing_utils.queue_parsed_features(feature_map)
            labels = features.pop('y')
            return features, labels

        def _predict_input_fn():
            feature_map = parsing_ops.parse_example(
                input_lib.limit_epochs(serialized_examples, num_epochs=1),
                feature_spec)
            features = linear_testing_utils.queue_parsed_features(feature_map)
            features.pop('y')
            return features, None

        fn_to_run(train_input_fn=_train_input_fn,
                  eval_input_fn=_eval_input_fn,
                  predict_input_fn=_predict_input_fn,
                  input_dimension=label_dimension,
                  label_dimension=label_dimension,
                  batch_size=batch_size,
                  fc_impl=fc_impl)
コード例 #3
0
    def test_input_fn_from_parse_example(self):
        """Tests complete flow with input_fn constructed from parse_example."""
        input_dimension = 2
        n_classes = 2
        batch_size = 10
        data = np.linspace(0.,
                           2.,
                           batch_size * input_dimension,
                           dtype=np.float32)
        data = data.reshape(batch_size, input_dimension)

        serialized_examples = []
        for datum in data:
            example = example_pb2.Example(features=feature_pb2.Features(
                feature={
                    'x':
                    feature_pb2.Feature(float_list=feature_pb2.FloatList(
                        value=datum)),
                    'y':
                    feature_pb2.Feature(float_list=feature_pb2.FloatList(
                        value=datum[:1])),
                }))
            serialized_examples.append(example.SerializeToString())

        feature_spec = {
            'x': parsing_ops.FixedLenFeature([input_dimension],
                                             dtypes.float32),
            'y': parsing_ops.FixedLenFeature([1], dtypes.float32),
        }

        def _train_input_fn():
            feature_map = parsing_ops.parse_example(serialized_examples,
                                                    feature_spec)
            features = _queue_parsed_features(feature_map)
            labels = features.pop('y')
            return features, labels

        def _eval_input_fn():
            feature_map = parsing_ops.parse_example(
                input_lib.limit_epochs(serialized_examples, num_epochs=1),
                feature_spec)
            features = _queue_parsed_features(feature_map)
            labels = features.pop('y')
            return features, labels

        def _predict_input_fn():
            feature_map = parsing_ops.parse_example(
                input_lib.limit_epochs(serialized_examples, num_epochs=1),
                feature_spec)
            features = _queue_parsed_features(feature_map)
            features.pop('y')
            return features, None

        self._test_complete_flow(train_input_fn=_train_input_fn,
                                 eval_input_fn=_eval_input_fn,
                                 predict_input_fn=_predict_input_fn,
                                 input_dimension=input_dimension,
                                 n_classes=n_classes,
                                 batch_size=batch_size)
コード例 #4
0
    def create_tf_record(self):
        print('\ncreate_tf_record')

        tmp_dir = os.path.join(os.environ['HOME'], 'tmp')
        if not os.path.isdir(tmp_dir):
            os.makedirs(tmp_dir)

        path = os.path.join(tmp_dir, 'tfrecord')
        writer = tf.python_io.TFRecordWriter(path=path)

        image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
        flat_mask = (4 * 5) * [1.0]
        with self.test_session():
            encoded_jpeg = tf.image.encode_jpeg(
                tf.constant(image_tensor)).eval()

        example = example_pb2.Example(features=feature_pb2.Features(
            feature={
                'image/encoded':
                feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                    value=[encoded_jpeg])),
                'image/format':
                feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                    value=['jpeg'.encode('utf-8')])),
                'image/height':
                feature_pb2.Feature(int64_list=feature_pb2.Int64List(
                    value=[4])),
                'image/width':
                feature_pb2.Feature(int64_list=feature_pb2.Int64List(
                    value=[5])),
                'image/object/bbox/xmin':
                feature_pb2.Feature(float_list=feature_pb2.FloatList(
                    value=[0.0])),
                'image/object/bbox/xmax':
                feature_pb2.Feature(float_list=feature_pb2.FloatList(
                    value=[1.0])),
                'image/object/bbox/ymin':
                feature_pb2.Feature(float_list=feature_pb2.FloatList(
                    value=[0.0])),
                'image/object/bbox/ymax':
                feature_pb2.Feature(float_list=feature_pb2.FloatList(
                    value=[1.0])),
                'image/object/class/label':
                feature_pb2.Feature(int64_list=feature_pb2.Int64List(
                    value=[2])),
                'image/object/mask':
                feature_pb2.Feature(float_list=feature_pb2.FloatList(
                    value=flat_mask)),
            }))
        writer.write(example.SerializeToString())
        writer.close()

        return path
コード例 #5
0
    def test_input_fn_from_parse_example(self):
        """Tests complete flow with input_fn constructed from parse_example."""
        input_dim = 4
        batch_size = 6
        data = np.zeros([batch_size, input_dim])

        serialized_examples = []
        for datum in data:
            example = example_pb2.Example(features=feature_pb2.Features(
                feature={
                    'x':
                    feature_pb2.Feature(float_list=feature_pb2.FloatList(
                        value=datum)),
                    'y':
                    feature_pb2.Feature(float_list=feature_pb2.FloatList(
                        value=datum)),
                }))
            serialized_examples.append(example.SerializeToString())

        feature_spec = {
            'x': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
            'y': parsing_ops.FixedLenFeature([input_dim], dtypes.float32),
        }

        def _train_input_fn():
            feature_map = parsing_ops.parse_example(serialized_examples,
                                                    feature_spec)
            _, features = graph_io.queue_parsed_features(feature_map)
            labels = features.pop('y')
            return features, labels

        def _eval_input_fn():
            feature_map = parsing_ops.parse_example(
                input_lib.limit_epochs(serialized_examples, num_epochs=1),
                feature_spec)
            _, features = graph_io.queue_parsed_features(feature_map)
            labels = features.pop('y')
            return features, labels

        def _predict_input_fn():
            feature_map = parsing_ops.parse_example(
                input_lib.limit_epochs(serialized_examples, num_epochs=1),
                feature_spec)
            _, features = graph_io.queue_parsed_features(feature_map)
            features.pop('y')
            return features, None

        self._test_complete_flow(train_input_fn=_train_input_fn,
                                 eval_input_fn=_eval_input_fn,
                                 predict_input_fn=_predict_input_fn,
                                 prediction_size=[batch_size, input_dim])
コード例 #6
0
    def create_tf_record(self, has_additional_channels=False):
        path = os.path.join(self.get_temp_dir(), 'tfrecord')
        writer = tf.python_io.TFRecordWriter(path)

        image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
        additional_channels_tensor = np.random.randint(
            255, size=(4, 5, 1)).astype(np.uint8)
        flat_mask = (4 * 5) * [1.0]
        with self.test_session():
            encoded_jpeg = tf.image.encode_jpeg(
                tf.constant(image_tensor)).eval()
            encoded_additional_channels_jpeg = tf.image.encode_jpeg(
                tf.constant(additional_channels_tensor)).eval()
        features = {
            'image/encoded':
            feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                value=[encoded_jpeg])),
            'image/format':
            feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                value=['jpeg'.encode('utf-8')])),
            'image/height':
            feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[4])),
            'image/width':
            feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[5])),
            'image/object/bbox/xmin':
            feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[0.0])),
            'image/object/bbox/xmax':
            feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[1.0])),
            'image/object/bbox/ymin':
            feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[0.0])),
            'image/object/bbox/ymax':
            feature_pb2.Feature(float_list=feature_pb2.FloatList(value=[1.0])),
            'image/object/class/label':
            feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=[2])),
            'image/object/mask':
            feature_pb2.Feature(float_list=feature_pb2.FloatList(
                value=flat_mask)),
        }
        if has_additional_channels:
            features[
                'image/additional_channels/encoded'] = feature_pb2.Feature(
                    bytes_list=feature_pb2.BytesList(
                        value=[encoded_additional_channels_jpeg] * 2))
        example = example_pb2.Example(features=feature_pb2.Features(
            feature=features))
        writer.write(example.SerializeToString())
        writer.close()

        return path
コード例 #7
0
  def test_input_fn_from_parse_example(self, fc_impl):
    """Tests complete flow with input_fn constructed from parse_example."""
    input_dimension = 2
    n_classes = 3
    batch_size = 10
    data = np.linspace(
        0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
    data = data.reshape(batch_size, input_dimension)

    serialized_examples = []
    for datum in data:
      example = example_pb2.Example(
          features=feature_pb2.Features(
              feature={
                  'x':
                      feature_pb2.Feature(
                          float_list=feature_pb2.FloatList(value=datum)),
                  'y':
                      feature_pb2.Feature(
                          int64_list=feature_pb2.Int64List(
                              value=self._as_label(datum[:1]))),
              }))
      serialized_examples.append(example.SerializeToString())

    feature_spec = {
        'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
        'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
    }

    def _train_input_fn():
      feature_map = tf.compat.v1.io.parse_example(serialized_examples,
                                                  feature_spec)
      features = linear_testing_utils.queue_parsed_features(feature_map)
      labels = features.pop('y')
      return features, labels

    def _eval_input_fn():
      feature_map = tf.compat.v1.io.parse_example(
          tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
          feature_spec)
      features = linear_testing_utils.queue_parsed_features(feature_map)
      labels = features.pop('y')
      return features, labels

    def _predict_input_fn():
      feature_map = tf.compat.v1.io.parse_example(
          tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
          feature_spec)
      features = linear_testing_utils.queue_parsed_features(feature_map)
      features.pop('y')
      return features, None

    self._test_complete_flow(
        train_input_fn=_train_input_fn,
        eval_input_fn=_eval_input_fn,
        predict_input_fn=_predict_input_fn,
        input_dimension=input_dimension,
        n_classes=n_classes,
        batch_size=batch_size,
        fc_impl=fc_impl)
コード例 #8
0
  def testSaveWithSignatures(self):
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(5, input_shape=(3,),
                                 kernel_regularizer=regularizers.get('l2')))
    model.add(keras.layers.Dropout(0.5))
    model.add(keras.layers.Dense(4, kernel_regularizer=regularizers.get('l2')))

    input_arr = np.random.random((2, 3))
    target_arr = np.random.random((2, 4))

    model.compile(
        loss='mse',
        optimizer='rmsprop')
    model.train_on_batch(input_arr, target_arr)

    @tf.function(input_signature=[tf.TensorSpec((None, 3))])
    def predict(inputs):
      return {'predictions': model(inputs)}

    feature_configs = {
        'inputs': tf.io.FixedLenFeature(
            shape=[2, 3], dtype=tf.float32)}

    @tf.function(
        input_signature=[tf.TensorSpec([None], tf.string)])
    def parse_and_predict(examples):
      features = tf.compat.v1.parse_single_example(examples[0], feature_configs)
      return {'predictions': model(features['inputs']),
              'layer_1_outputs': model.layers[0](features['inputs'])}

    saved_model_dir = self._save_model_dir()
    model.save(saved_model_dir, save_format='tf', signatures={
        'predict': predict,
        'parse_and_predict': parse_and_predict})
    model.save('/tmp/saved', save_format='tf', signatures={
        'predict': predict,
        'parse_and_predict': parse_and_predict})

    loaded = keras_load.load(saved_model_dir)

    self.assertAllClose(
        model.predict(input_arr),
        loaded.signatures['predict'](tf.convert_to_tensor(
            input_arr.astype('float32')))['predictions'])

    feature = {
        'inputs': feature_pb2.Feature(
            float_list=feature_pb2.FloatList(
                value=input_arr.astype('float32').flatten()))}
    example = example_pb2.Example(
        features=feature_pb2.Features(feature=feature))
    outputs = loaded.signatures['parse_and_predict'](
        tf.convert_to_tensor([example.SerializeToString()]))
    self.assertAllClose(model.predict(input_arr), outputs['predictions'])
    self.assertAllClose(model.layers[0](input_arr), outputs['layer_1_outputs'])
コード例 #9
0
 def test_parse_example_with_default_value(self):
     price = fc.numeric_column('price', shape=[2], default_value=11.)
     data = example_pb2.Example(features=feature_pb2.Features(
         feature={
             'price':
             feature_pb2.Feature(float_list=feature_pb2.FloatList(
                 value=[20., 110.]))
         }))
     no_data = example_pb2.Example(features=feature_pb2.Features(
         feature={
             'something_else':
             feature_pb2.Feature(float_list=feature_pb2.FloatList(
                 value=[20., 110.]))
         }))
     features = parsing_ops.parse_example(
         serialized=[data.SerializeToString(),
                     no_data.SerializeToString()],
         features=price._parse_example_config)
     self.assertIn('price', features)
     with self.test_session():
         self.assertAllEqual([[20., 110.], [11., 11.]],
                             features['price'].eval())
コード例 #10
0
 def _test_identity_savedmodel(self, export_dir):
   with tf.Graph().as_default() as graph:
     with tf.Session(graph=graph) as sess:
       metagraph_def = tf.saved_model.loader.load(sess, [tf.saved_model.SERVING], export_dir)
       fetch = metagraph_def.signature_def['predictions'].outputs['outputs']
       feed = metagraph_def.signature_def['predictions'].inputs['inputs']
       for x in self._data:
         example = example_pb2.Example(
             features=feature_pb2.Features(
                 feature={
                     'x':
                         feature_pb2.Feature(
                             float_list=feature_pb2.FloatList(
                                 value=np.ravel(x)))
                 })).SerializeToString()
         y = sess.run(fetch.name, feed_dict={feed.name: [example]})
         self.assertAlmostEqual(y, x[0], delta=0.01)
コード例 #11
0
    def _create_feature(feature):
        feature_list = feature if isinstance(feature, list) else [feature]

        # Each feature can be exactly one kind:
        # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto#L76

        feature_type = type(feature_list[0])
        if feature_type == int:
            return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=feature_list))
        elif feature_type == str:
            return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=feature_list))
        elif feature_type == unicode:
            return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=map(lambda x: str(x), feature_list)))
        elif feature_type == float:
            return feature_pb2.Feature(float_list=feature_pb2.FloatList(value=feature_list))
        else:
            message = """Unsupported request data format: {}, {}.
                            Valid formats: float, int, str any object that implements __iter__
                                           or classification_pb2.ClassificationRequest"""
            raise ValueError(message.format(feature, type(feature)))
コード例 #12
0
 def _EncodedFloatFeature(self, ndarray):
     return feature_pb2.Feature(float_list=feature_pb2.FloatList(
         value=ndarray.flatten().tolist()))
コード例 #13
0
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging

# Helpers for creating Example objects
example = example_pb2.Example
feature = feature_pb2.Feature
features = lambda d: feature_pb2.Features(feature=d)
bytes_feature = lambda v: feature(bytes_list=feature_pb2.BytesList(value=v))
int64_feature = lambda v: feature(int64_list=feature_pb2.Int64List(value=v))
float_feature = lambda v: feature(float_list=feature_pb2.FloatList(value=v))
# Helpers for creating SequenceExample objects
feature_list = lambda l: feature_pb2.FeatureList(feature=l)
feature_lists = lambda d: feature_pb2.FeatureLists(feature_list=d)
sequence_example = example_pb2.SequenceExample


def _compare_output_to_expected(tester, dict_tensors, expected_tensors,
                                flat_output):
  tester.assertEqual(set(dict_tensors.keys()), set(expected_tensors.keys()))

  i = 0  # Index into the flattened output of session.run()
  for k, v in sorted(dict_tensors.items()):
    # TODO(shivaniagrawal): flat_output is same as v.
    expected_v = expected_tensors[k]
    tf_logging.info("Comparing key: %s", k)
コード例 #14
0
    def testDecodeJpegImageAndBoundingBox(self):
        """Test if the decoder can correctly decode the image and bounding box.

    A set of random images (represented as an image tensor) is first decoded as
    the groundtrue image. Meanwhile, the image tensor will be encoded and pass
    through the sequence example, and then decoded as images. The groundtruth
    image and the decoded image are expected to be equal. Similar tests are
    also applied to labels such as bounding box.
    """
        image_tensor = np.random.randint(256,
                                         size=(256, 256, 3)).astype(np.uint8)
        encoded_jpeg = self._EncodeImage(image_tensor)
        decoded_jpeg = self._DecodeImage(encoded_jpeg)

        sequence_example = example_pb2.SequenceExample(
            feature_lists=feature_pb2.FeatureLists(
                feature_list={
                    'image/encoded':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
                            value=[encoded_jpeg])),
                    ]),
                    'bbox/xmin':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[0.0])),
                    ]),
                    'bbox/xmax':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[1.0]))
                    ]),
                    'bbox/ymin':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[0.0])),
                    ]),
                    'bbox/ymax':
                    feature_pb2.FeatureList(feature=[
                        feature_pb2.Feature(float_list=feature_pb2.FloatList(
                            value=[1.0]))
                    ]),
                })).SerializeToString()

        example_decoder = tf_sequence_example_decoder.TFSequenceExampleDecoder(
        )
        tensor_dict = example_decoder.decode(
            tf.convert_to_tensor(sequence_example))

        # Test tensor dict image dimension.
        self.assertAllEqual(
            (tensor_dict[fields.InputDataFields.image].get_shape().as_list()),
            [None, None, None, 3])
        with self.test_session() as sess:
            tensor_dict[fields.InputDataFields.image] = tf.squeeze(
                tensor_dict[fields.InputDataFields.image])
            tensor_dict[fields.InputDataFields.groundtruth_boxes] = tf.squeeze(
                tensor_dict[fields.InputDataFields.groundtruth_boxes])
            tensor_dict = sess.run(tensor_dict)

        # Test decoded image.
        self.assertAllEqual(decoded_jpeg,
                            tensor_dict[fields.InputDataFields.image])
        # Test decoded bounding box.
        self.assertAllEqual(
            [0.0, 0.0, 1.0, 1.0],
            tensor_dict[fields.InputDataFields.groundtruth_boxes])