def testModelMapAndBatch(self):
        batch_size = 16
        k = 1024 * 1024
        dataset = dataset_ops.Dataset.from_tensors(
            (np.random.rand(1, 4 * k), np.random.rand(4 * k, 1))).repeat()
        dataset = dataset.apply(
            batching.map_and_batch(math_ops.matmul,
                                   num_parallel_calls=optimization.AUTOTUNE,
                                   batch_size=batch_size))
        iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
        get_next = iterator.get_next()

        deltas = []
        with self.test_session() as sess:
            for _ in range(5):
                sess.run(get_next.op)
            for _ in range(10):
                start = time.time()
                sess.run(get_next.op)
                end = time.time()
                deltas.append(end - start)

        print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
              (np.median(deltas), np.mean(deltas), np.std(deltas),
               np.min(deltas), np.max(deltas)))
  def testModelMapAndBatch(self):
    batch_size = 16
    k = 1024 * 1024
    dataset = dataset_ops.Dataset.from_tensors((np.random.rand(1, 4 * k),
                                                np.random.rand(4 * k,
                                                               1))).repeat()
    dataset = dataset.apply(
        batching.map_and_batch(
            math_ops.matmul,
            num_parallel_calls=optimization.AUTOTUNE,
            batch_size=batch_size))
    iterator = dataset.apply(optimization.model()).make_one_shot_iterator()
    get_next = iterator.get_next()

    deltas = []
    with self.cached_session() as sess:
      for _ in range(5):
        sess.run(get_next.op)
      for _ in range(10):
        start = time.time()
        sess.run(get_next.op)
        end = time.time()
        deltas.append(end - start)

    print("%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n" %
          (np.median(deltas), np.mean(deltas), np.std(deltas), np.min(deltas),
           np.max(deltas)))
Beispiel #3
0
 def dataset_fn():
   dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
   # TODO (isaprykin): map_and_batch with drop_remainder causes shapes to be id:1091
   # https://github.com/imdone/tensorflow/issues/1092
   # fully defined for TPU.  Remove this when XLA supports dynamic shapes.
   return dataset.apply(
       batching.map_and_batch(lambda x: x, batch_size=2, drop_remainder=True))
Beispiel #4
0
def train_data_generator(batch_size):
    with tf.name_scope('train_batch_processing'):
        data_dir = FLAGS.train_data_dir
        glob_pattern = os.path.join(data_dir, 'train-*-of-*')
        file_names = gfile.Glob(glob_pattern)
        import random
        random.shuffle(file_names)
        ds = tf.data.TFRecordDataset.list_files(file_names)

        ds = ds.apply(
            interleave_ops.parallel_interleave(tf.data.TFRecordDataset,
                                               cycle_length=10))
        counter = tf.data.Dataset.range(batch_size)
        counter = counter.repeat()
        flags = tf.data.Dataset.from_tensors(tf.constant('train'))
        flags = flags.repeat()
        ds = tf.data.Dataset.zip((ds, counter, flags))
        ds = ds.prefetch(buffer_size=batch_size * 4)
        ds = ds.shuffle(buffer_size=1000)
        ds = ds.repeat()
        ds = ds.apply(
            batching.map_and_batch(map_func=preprocess_fn,
                                   batch_size=batch_size,
                                   num_parallel_batches=10))
        ds = ds.prefetch(buffer_size=10)
        from tensorflow.contrib.data.python.ops import threadpool
        ds = threadpool.override_threadpool(
            ds,
            threadpool.PrivateThreadPool(
                10, display_name='input_pipeline_thread_pool'))
        #    ds_iterator = ds.make_initializable_iterator()
        return ds
Beispiel #5
0
 def dataset_fn():
     dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
     # TODO(isaprykin): map_and_batch with drop_remainder causes shapes to be
     # fully defined for TPU.  Remove this when XLA supports dynamic shapes.
     return dataset.apply(
         batching.map_and_batch(lambda x: x,
                                batch_size=1,
                                drop_remainder=True))
  def testMapAndBatchTypes(self, element, dtype):
    def gen():
      yield element

    dataset = dataset_ops.Dataset.from_generator(gen, dtype).repeat(100).apply(
        batching.map_and_batch(lambda x: x, batch_size=10))

    get_next = dataset.make_one_shot_iterator().get_next()

    with self.cached_session() as sess:
      for _ in range(10):
        self.assertAllEqual([element for _ in range(10)], sess.run(get_next))
Beispiel #7
0
 def testBatchAndMapDatasetFails(self):
   """Test a dataset that maps a TF function across its input elements."""
   dataset = dataset_ops.Dataset.from_tensors(
       array_ops.check_numerics(
           constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
   batch_size = array_ops.placeholder(dtypes.int64, shape=[])
   iterator = (dataset.apply(batching.map_and_batch(lambda x: x, batch_size))
               .make_initializable_iterator())
   init_op = iterator.initializer
   with self.test_session() as sess:
     with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
       sess.run(init_op, feed_dict={batch_size: 14})
 def testBatchAndMapDatasetFails(self):
   """Test a dataset that maps a TF function across its input elements."""
   dataset = dataset_ops.Dataset.from_tensors(
       array_ops.check_numerics(
           constant_op.constant(1.0) / constant_op.constant(0.0), "oops"))
   batch_size = array_ops.placeholder(dtypes.int64, shape=[])
   iterator = (dataset.apply(batching.map_and_batch(lambda x: x, batch_size))
               .make_initializable_iterator())
   init_op = iterator.initializer
   with self.test_session() as sess:
     with self.assertRaisesRegexp(errors.InvalidArgumentError, "oops"):
       sess.run(init_op, feed_dict={batch_size: 14})
Beispiel #9
0
    def build_ds(range_start, drop_remainder=False):

      def _map_fn(x):
        return math_ops.square(x)

      return dataset_ops.Dataset.range(
          range_start, range_start + range_size).repeat(num_repeats).apply(
              batching.map_and_batch(
                  map_func=_map_fn,
                  batch_size=batch_size,
                  num_parallel_calls=num_parallel_calls,
                  drop_remainder=drop_remainder))
Beispiel #10
0
  def testMapAndBatchTypes(self, element, dtype):
    def gen():
      yield element

    dataset = dataset_ops.Dataset.from_generator(gen, dtype).repeat(100).apply(
        batching.map_and_batch(lambda x: x, batch_size=10))

    get_next = dataset.make_one_shot_iterator().get_next()

    with self.test_session() as sess:
      for _ in range(10):
        self.assertAllEqual([element for _ in range(10)], sess.run(get_next))
Beispiel #11
0
 def testMapAndBatchYieldsPartialBatch(self):
   iterator = (dataset_ops.Dataset.range(10)
               .apply(batching.map_and_batch(
                   lambda x: array_ops.reshape(x * x, [1]), 4))
               .make_one_shot_iterator())
   self.assertEqual([None, 1], iterator.output_shapes.as_list())
   next_element = iterator.get_next()
   with self.test_session() as sess:
     self.assertAllEqual([[0], [1], [4], [9]], sess.run(next_element))
     self.assertAllEqual([[16], [25], [36], [49]], sess.run(next_element))
     self.assertAllEqual([[64], [81]], sess.run(next_element))
     with self.assertRaises(errors.OutOfRangeError):
       sess.run(next_element)
 def testMapAndBatchYieldsPartialBatch(self):
   iterator = (dataset_ops.Dataset.range(10)
               .apply(batching.map_and_batch(
                   lambda x: array_ops.reshape(x * x, [1]), 4))
               .make_one_shot_iterator())
   self.assertEqual([None, 1], iterator.output_shapes.as_list())
   next_element = iterator.get_next()
   with self.cached_session() as sess:
     self.assertAllEqual([[0], [1], [4], [9]], sess.run(next_element))
     self.assertAllEqual([[16], [25], [36], [49]], sess.run(next_element))
     self.assertAllEqual([[64], [81]], sess.run(next_element))
     with self.assertRaises(errors.OutOfRangeError):
       sess.run(next_element)
Beispiel #13
0
def create_iterator(
        params,  #hsj
        batch_size,
        num_splits,
        batch_size_per_split,
        preprocess_fn,
        dataset,
        subset,
        train,
        cache_data,
        num_threads=None):
    """Creates a dataset iterator for the benchmark."""
    glob_pattern = dataset.tf_record_pattern(subset)
    file_names = gfile.Glob(glob_pattern)
    if not file_names:
        raise ValueError(
            'Found no files in --data_dir matching: {}'.format(glob_pattern))
    ds = tf.data.TFRecordDataset.list_files(file_names)
    ds = ds.apply(
        interleave_ops.parallel_interleave(tf.data.TFRecordDataset,
                                           cycle_length=10))
    if cache_data:
        ds = ds.take(1).cache().repeat()

    ##hsj
    if train:
        ds = ds.shard(params.num_shards, params.shard_idx)
    ##
    counter = tf.data.Dataset.range(batch_size)
    counter = counter.repeat()
    ds = tf.data.Dataset.zip((ds, counter))
    ds = ds.prefetch(buffer_size=batch_size)
    if train:
        ds = ds.shuffle(buffer_size=10000)
    ds = ds.repeat()
    ds = ds.apply(
        batching.map_and_batch(map_func=preprocess_fn,
                               batch_size=batch_size_per_split,
                               num_parallel_batches=num_splits))
    ds = ds.prefetch(buffer_size=num_splits)
    if num_threads:
        ds = threadpool.override_threadpool(
            ds,
            threadpool.PrivateThreadPool(
                num_threads, display_name='input_pipeline_thread_pool'))
        ds_iterator = ds.make_initializable_iterator()
        tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
                             ds_iterator.initializer)
    else:
        ds_iterator = ds.make_one_shot_iterator()
    return ds_iterator
 def create_dataset(self,
                    batch_size,
                    num_splits,
                    batch_size_per_split,
                    dataset,
                    subset,
                    train,
                    datasets_repeat_cached_sample,
                    num_threads=None,
                    datasets_use_caching=False,
                    datasets_parallel_interleave_cycle_length=None,
                    datasets_sloppy_parallel_interleave=False):
   """Creates a dataset for the benchmark."""
   assert self.supports_datasets()
   glob_pattern = dataset.tf_record_pattern(subset)
   file_names = gfile.Glob(glob_pattern)
   if not file_names:
     raise ValueError('Found no files in --data_dir matching: {}'
                      .format(glob_pattern))
   ds = tf.data.TFRecordDataset.list_files(file_names)
   ds = ds.apply(
       interleave_ops.parallel_interleave(
           tf.data.TFRecordDataset,
           cycle_length=datasets_parallel_interleave_cycle_length or 10,
           sloppy=datasets_sloppy_parallel_interleave))
   if datasets_repeat_cached_sample:
     # Repeat a single sample element indefinitely to emulate memory-speed IO.
     ds = ds.take(1).cache().repeat()
   counter = tf.data.Dataset.range(batch_size)
   counter = counter.repeat()
   ds = tf.data.Dataset.zip((ds, counter))
   ds = ds.prefetch(buffer_size=batch_size)
   if datasets_use_caching:
     ds = ds.cache()
   if train:
     ds = ds.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=10000))
   else:
     ds = ds.repeat()
   ds = ds.apply(
       batching.map_and_batch(
           map_func=self.parse_and_preprocess,
           batch_size=batch_size_per_split,
           num_parallel_batches=num_splits))
   ds = ds.prefetch(buffer_size=num_splits)
   if num_threads:
     ds = threadpool.override_threadpool(
         ds,
         threadpool.PrivateThreadPool(
             num_threads, display_name='input_pipeline_thread_pool'))
   return ds
Beispiel #15
0
 def testMapAndBatchParallelGetNext(self):
   iterator = (dataset_ops.Dataset.range(500000)
               .apply(batching.map_and_batch(lambda x: x, batch_size=100))
               .make_one_shot_iterator())
   elements = []
   for _ in range(100):
     elements.append(iterator.get_next())
   with self.test_session() as sess:
     for i in range(50):
       got = sess.run(elements)
       got.sort(key=lambda x: x[0])
       expected = []
       for j in range(100):
         expected.append(range(i*10000+j*100, i*10000+(j+1)*100))
       self.assertAllEqual(got, expected)
     with self.assertRaises(errors.OutOfRangeError):
       sess.run(elements)
 def testMapAndBatchParallelGetNext(self):
   iterator = (dataset_ops.Dataset.range(50000)
               .apply(batching.map_and_batch(lambda x: x, batch_size=100))
               .make_one_shot_iterator())
   elements = []
   for _ in range(100):
     elements.append(iterator.get_next())
   with self.cached_session() as sess:
     for i in range(5):
       got = sess.run(elements)
       got.sort(key=lambda x: x[0])
       expected = []
       for j in range(100):
         expected.append(range(i*10000+j*100, i*10000+(j+1)*100))
       self.assertAllEqual(got, expected)
     with self.assertRaises(errors.OutOfRangeError):
       sess.run(elements)
def image_set_new(filenames,
                  batch_size,
                  height,
                  width,
                  training=False,
                  distort_color=False,
                  deterministic=False,
                  num_threads=10,
                  nsummary=10,
                  cache_data=False,
                  num_splits=1):
    ds = tf.data.TFRecordDataset.list_files(filenames)
    if training:
        ds = ds.shard(hvd.size(), hvd.rank())  # HACK TESTING
    ds = ds.shuffle(buffer_size=10000,
                    seed=5 * (1 + hvd.rank()) if deterministic else None)
    ds = ds.apply(
        interleave_ops.parallel_interleave(tf.data.TFRecordDataset,
                                           cycle_length=10))
    if cache_data:
        ds = ds.take(1).cache().repeat()
    counter = tf.data.Dataset.range(batch_size)
    counter = counter.repeat()
    ds = tf.data.Dataset.zip((ds, counter))
    ds = ds.prefetch(buffer_size=batch_size)
    if training:
        ds = ds.shuffle(buffer_size=10000,
                        seed=13 * (1 + hvd.rank()) if deterministic else None)
    ds = ds.repeat()
    preproc_func = lambda record, counter_: _parse_and_preprocess_image_record(
        record,
        counter_,
        height,
        width,
        deterministic,
        random_crop=training,
        distort_color=distort_color,
        nsummary=nsummary if training else 0)
    assert (batch_size % num_splits == 0)
    ds = ds.apply(
        batching.map_and_batch(map_func=preproc_func,
                               batch_size=batch_size // num_splits,
                               num_parallel_batches=num_splits))
    ds = ds.prefetch(buffer_size=num_splits)
    return ds
Beispiel #18
0
def create_iterator(batch_size,
                    num_threads,
                    parallel_interleave_cycle_length=0,
                    input_file_spec=None,
                    input_filenames=None,
                    dataset_buffer_size=None,
                    prefetch_records=None):
    if input_filenames:
        ds = tf.data.Dataset.from_tensor_slices(
            tf.convert_to_tensor(input_filenames))
    elif input_file_spec:
        ds = tf.data.TFRecordDataset.list_files(input_file_spec)
    else:
        raise ValueError('You must specify input_file_spec or input_filenames')

    if parallel_interleave_cycle_length:
        ds = ds.apply(
            interleave_ops.parallel_interleave(
                lambda f: tf.data.TFRecordDataset(
                    f, buffer_size=dataset_buffer_size),
                cycle_length=parallel_interleave_cycle_length))
    else:
        ds = ds.apply(tf.data.TFRecordDataset)

    ds = ds.prefetch(buffer_size=prefetch_records)
    ds = ds.repeat()
    num_splits = 1
    ds = ds.apply(
        batching.map_and_batch(map_func=process_record,
                               batch_size=batch_size,
                               num_parallel_batches=num_splits))
    ds = ds.prefetch(buffer_size=num_splits)

    if num_threads:
        ds = threadpool.override_threadpool(
            ds,
            threadpool.PrivateThreadPool(
                num_threads, display_name='input_pipeline_thread_pool'))
        ds_iterator = ds.make_initializable_iterator()
        tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
                             ds_iterator.initializer)
    else:
        ds_iterator = ds.make_one_shot_iterator()
    return ds_iterator
Beispiel #19
0
 def _testMapAndBatchPartialBatchHelper(self, drop_remainder=False):
   iterator = (
       dataset_ops.Dataset.range(10).apply(
           batching.map_and_batch(
               lambda x: array_ops.reshape(x * x, [1]),
               batch_size=4,
               drop_remainder=drop_remainder)).make_one_shot_iterator())
   if drop_remainder:
     self.assertEqual([4, 1], iterator.output_shapes.as_list())
   else:
     self.assertEqual([None, 1], iterator.output_shapes.as_list())
   next_element = iterator.get_next()
   with self.test_session() as sess:
     self.assertAllEqual([[0], [1], [4], [9]], sess.run(next_element))
     self.assertAllEqual([[16], [25], [36], [49]], sess.run(next_element))
     if not drop_remainder:
       self.assertAllEqual([[64], [81]], sess.run(next_element))
     with self.assertRaises(errors.OutOfRangeError):
       sess.run(next_element)
Beispiel #20
0
    def testBatchAndMapDatasetShapeMismatch(self):
        """Test a dataset that maps a TF function across its input elements."""
        def generator():
            yield [1]
            yield [2]
            yield [3]
            yield [[4, 5, 6]]

        dataset = dataset_ops.Dataset.from_generator(generator,
                                                     output_types=dtypes.int32)
        batch_size = 4
        iterator = (dataset.apply(
            batching.map_and_batch(lambda x: x,
                                   batch_size)).make_initializable_iterator())
        init_op = iterator.initializer
        get_next = iterator.get_next()
        with self.test_session() as sess:
            sess.run(init_op)
            with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                         "number of elements does not match"):
                sess.run(get_next)
Beispiel #21
0
    def testMapAndBatchImplicitDispose(self):
        # Tests whether a map and batch dataset will be cleaned up correctly when
        # the pipeline does not run it until exhaustion.
        # The pipeline is TensorSliceDataset -> RepeatDataset(1000) ->
        # MapAndBatchDataset(f=square_3, batch_size=100).
        components = (np.arange(1000),
                      np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
                      np.array(37.0) * np.arange(1000))

        def _map_fn(x, y, z):
            return math_ops.square(x), math_ops.square(y), math_ops.square(z)

        dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
            1000).apply(batching.map_and_batch(_map_fn, batch_size=100))
        dataset = dataset.prefetch(5)
        iterator = dataset.make_one_shot_iterator()
        get_next = iterator.get_next()

        with self.test_session() as sess:
            for _ in range(3):
                sess.run(get_next)
  def testMapAndBatchImplicitDispose(self):
    # Tests whether a map and batch dataset will be cleaned up correctly when
    # the pipeline does not run it until exhaustion.
    # The pipeline is TensorSliceDataset -> RepeatDataset(1000) ->
    # MapAndBatchDataset(f=square_3, batch_size=100).
    components = (np.arange(1000),
                  np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
                  np.array(37.0) * np.arange(1000))

    def _map_fn(x, y, z):
      return math_ops.square(x), math_ops.square(y), math_ops.square(z)

    dataset = dataset_ops.Dataset.from_tensor_slices(components).repeat(
        1000).apply(batching.map_and_batch(_map_fn, batch_size=100))
    dataset = dataset.prefetch(5)
    iterator = dataset.make_one_shot_iterator()
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      for _ in range(3):
        sess.run(get_next)
  def testBatchAndMapDatasetShapeMismatch(self):
    """Test a dataset that maps a TF function across its input elements."""
    def generator():
      yield [1]
      yield [2]
      yield [3]
      yield [[4, 5, 6]]

    dataset = dataset_ops.Dataset.from_generator(
        generator, output_types=dtypes.int32)
    batch_size = 4
    iterator = (
        dataset.apply(batching.map_and_batch(lambda x: x, batch_size))
        .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()
    with self.test_session() as sess:
      sess.run(init_op)
      with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                   "number of elements does not match"):
        sess.run(get_next)
Beispiel #24
0
def create_dataset(batch_size,
                   num_splits,
                   batch_size_per_split,
                   preprocess_fn,
                   dataset,
                   subset,
                   train,
                   cache_data,
                   num_threads=None):
  """Creates a dataset for the benchmark."""
  glob_pattern = dataset.tf_record_pattern(subset)
  file_names = gfile.Glob(glob_pattern)
  if not file_names:
    raise ValueError('Found no files in --data_dir matching: {}'
                     .format(glob_pattern))
  ds = tf.data.TFRecordDataset.list_files(file_names)
  ds = ds.apply(
      interleave_ops.parallel_interleave(
          tf.data.TFRecordDataset, cycle_length=10))
  if cache_data:
    ds = ds.take(1).cache().repeat()
  counter = tf.data.Dataset.range(batch_size)
  counter = counter.repeat()
  ds = tf.data.Dataset.zip((ds, counter))
  ds = ds.prefetch(buffer_size=batch_size)
  if train:
    ds = ds.shuffle(buffer_size=10000)
  ds = ds.repeat()
  ds = ds.apply(
      batching.map_and_batch(
          map_func=preprocess_fn,
          batch_size=batch_size_per_split,
          num_parallel_batches=num_splits))
  ds = ds.prefetch(buffer_size=num_splits)
  if num_threads:
    ds = threadpool.override_threadpool(
        ds,
        threadpool.PrivateThreadPool(
            num_threads, display_name='input_pipeline_thread_pool'))
  return ds
Beispiel #25
0
    def testComplexPipeline(self):
        # Setup a complex input pipeline.
        batch_size = 2
        num_epochs = 5
        dataset = dataset_ops.Dataset.from_tensor_slices(
            self._createTFRecordFiles())
        dataset = dataset.shuffle(buffer_size=self._num_files)
        dataset = dataset.flat_map(readers.TFRecordDataset)
        dataset = dataset.prefetch(buffer_size=batch_size)
        dataset = dataset.shuffle(2 * self._num_files * self._num_records)
        dataset = dataset.repeat(num_epochs)
        dataset = dataset.apply(
            batching.map_and_batch(lambda x: x, batch_size=batch_size))
        dataset = dataset.prefetch(buffer_size=None)

        # Auto shard.
        dataset = input_ops.auto_shard_dataset(dataset, self._num_shards,
                                               self._shard_index)

        # Verify output.
        iterator = dataset.make_one_shot_iterator()
        next_element = iterator.get_next()
        with self.cached_session() as sess:
            actual = []
            num_iterations = (self._num_files * self._num_records *
                              num_epochs) // (self._num_shards * batch_size)
            for _ in range(num_iterations):
                actual.extend(sess.run(next_element))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(next_element)

            expected = []
            for f in range(0, self._num_files, self._num_shards):
                for r in range(self._num_records):
                    expected.append(self._record(r, f))
            expected *= num_epochs

            self.assertAllEqual(sorted(expected), sorted(actual))
Beispiel #26
0
  def testComplexPipeline(self):
    # Setup a complex input pipeline.
    batch_size = 2
    num_epochs = 5
    dataset = dataset_ops.Dataset.from_tensor_slices(
        self._createTFRecordFiles())
    dataset = dataset.shuffle(buffer_size=self._num_files)
    dataset = dataset.flat_map(readers.TFRecordDataset)
    dataset = dataset.prefetch(buffer_size=batch_size)
    dataset = dataset.shuffle(2 * self._num_files * self._num_records)
    dataset = dataset.repeat(num_epochs)
    dataset = dataset.apply(batching.map_and_batch(
        lambda x: x, batch_size=batch_size))
    dataset = dataset.prefetch(buffer_size=None)

    # Auto shard.
    dataset = input_ops.auto_shard_dataset(
        dataset, self._num_shards, self._shard_index)

    # Verify output.
    iterator = dataset.make_one_shot_iterator()
    next_element = iterator.get_next()
    with self.cached_session() as sess:
      actual = []
      num_iterations = (self._num_files * self._num_records * num_epochs) // (
          self._num_shards * batch_size)
      for _ in range(num_iterations):
        actual.extend(sess.run(next_element))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)

      expected = []
      for f in range(0, self._num_files, self._num_shards):
        for r in range(self._num_records):
          expected.append(self._record(r, f))
      expected *= num_epochs

      self.assertAllEqual(sorted(expected), sorted(actual))
Beispiel #27
0
  def testMapAndBatchSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).apply(
        batching.map_and_batch(_sparse, 5)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.test_session() as sess:
      sess.run(init_op)
      for i in range(2):
        actual = sess.run(get_next)
        expected = sparse_tensor.SparseTensorValue(
            indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
            values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
            dense_shape=[5, 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
Beispiel #28
0
    def testMapAndBatchOutOfRangeError(self, threshold):
        def raising_py_fn(i):
            if i >= threshold:
                raise StopIteration()
            else:
                return i

        iterator = (dataset_ops.Dataset.range(100).apply(
            batching.map_and_batch(
                lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),
                batch_size=10)).make_one_shot_iterator())
        get_next = iterator.get_next()

        with self.test_session() as sess:
            for i in range(threshold // 10):
                self.assertAllEqual([i * 10 + j for j in range(10)],
                                    sess.run(get_next))
            if threshold % 10 != 0:
                self.assertAllEqual(
                    [threshold // 10 * 10 + j for j in range(threshold % 10)],
                    sess.run(get_next))
            with self.assertRaises(errors.OutOfRangeError):
                sess.run(get_next)
  def testMapAndBatchSparse(self):

    def _sparse(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0]], values=(i * [1]), dense_shape=[1])

    iterator = dataset_ops.Dataset.range(10).apply(
        batching.map_and_batch(_sparse, 5)).make_initializable_iterator()
    init_op = iterator.initializer
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(init_op)
      for i in range(2):
        actual = sess.run(get_next)
        expected = sparse_tensor.SparseTensorValue(
            indices=[[0, 0], [1, 0], [2, 0], [3, 0], [4, 0]],
            values=[i * 5, i * 5 + 1, i * 5 + 2, i * 5 + 3, i * 5 + 4],
            dense_shape=[5, 1])
        self.assertTrue(sparse_tensor.is_sparse(actual))
        self.assertSparseValuesEqual(actual, expected)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
  def testMapAndBatchOutOfRangeError(self, threshold):

    def raising_py_fn(i):
      if i >= threshold:
        raise StopIteration()
      else:
        return i

    iterator = (
        dataset_ops.Dataset.range(100).apply(
            batching.map_and_batch(
                lambda x: script_ops.py_func(raising_py_fn, [x], dtypes.int64),
                batch_size=10)).make_one_shot_iterator())
    get_next = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(threshold // 10):
        self.assertAllEqual([i * 10 + j for j in range(10)], sess.run(get_next))
      if threshold % 10 != 0:
        self.assertAllEqual(
            [threshold // 10 * 10 + j for j in range(threshold % 10)],
            sess.run(get_next))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)
Beispiel #31
0
  def __call__(self, params):
    bs = params["batch_size"]

    dataset = get_split(
        "train" if self._is_training else "validation",
        dataset_dir=self._data_dir)

    if self._is_training:
      dataset = dataset.shuffle(buffer_size=1024)
      dataset = dataset.repeat()

    def _load_records(filename):
      return tf.data.TFRecordDataset(filename, buffer_size=16 * 1000 * 1000)

    dataset = dataset.apply(
        tf.contrib.data.parallel_interleave(
            _load_records, sloppy=True, cycle_length=8))

    dataset = dataset.prefetch(bs * 4)
    if self._is_training:
      dataset = dataset.apply(
          batching.map_and_batch(
              self._parse_record,
              batch_size=bs, num_parallel_batches=4))
    else:
      # Our test set might run out of data before our last batch.  Drop
      # any truncated batches in this case.
      dataset = dataset.map(self._parse_record, num_parallel_calls=64)
      dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(bs))

    dataset = dataset.prefetch(1)
    features, labels = dataset.make_one_shot_iterator().get_next()
    labels = tf.cast(labels, tf.int32)
    features.set_shape([bs, 224, 224, 3])
    labels.set_shape([bs])
    return features, labels
Beispiel #32
0
  def __call__(self, params):
    example_decoder = tf_example_decoder.TfExampleDecoder()

    def _parse_example(data):
      with tf.name_scope('augmentation'):
        source_id = tf.string_to_number(data['source_id'])
        image = tf.image.convert_image_dtype(data['image'], dtype=tf.float32)
        raw_shape = tf.shape(image)
        boxes = data['groundtruth_boxes']
        classes = tf.reshape(data['groundtruth_classes'], [-1, 1])

        # Only 80 of the 90 COCO classes are used.
        class_map = tf.convert_to_tensor(ssd_constants.CLASS_MAP)
        classes = tf.gather(class_map, classes)
        classes = tf.cast(classes, dtype=tf.float32)

        if self._is_training:
          image, boxes, classes = ssd_crop(image, boxes, classes)
          image, boxes = preprocessor.random_horizontal_flip(
              image=image, boxes=boxes)

          # TODO(someone in object detection): Color Jitter
          image = normalize_image(image)

          encoded_classes, _, encoded_boxes, _, _ = encode_labels(boxes,
                                                                  classes)

          # TODO(haoyuzhang): measure or remove this overhead of concat
          # Encode labels (number of boxes, coordinates of bounding boxes, and
          # classes into a single tensor, in order to be compatible with
          # staging area in data input pipeline.
          # Step 1: pack box coordinates and classes together
          #     [nboxes, 4] concat [nboxes, 1] ==> [nboxes, 5]
          labels = tf.concat([encoded_boxes, encoded_classes], 1)
          # Step 2 (HACK): repeat number of boxes (a scalar tensor) five times,
          #                and pack result from Step 1 with it.
          #     [nboxes, 5] concat [1, 5] ==> [nboxes + 1, 5]
          nboxes = tf.shape(boxes)[0]                   # scalar, shape (,)
          nboxes_1d = tf.tile(tf.expand_dims(nboxes, 0), [5])
                                                        # 1D tensor, shape (5)
          nboxes_2d = tf.expand_dims(nboxes_1d, 0)      # 2D tensor, shape (1,5)
          labels = tf.concat([labels, tf.cast(nboxes_2d, tf.float32)], 0)
          return image, labels

        else:
          image = tf.image.resize_images(
              image[tf.newaxis, :, :, :],
              size=(ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE)
          )[0, :, :, :]

          # TODO(someone in object detection): Color Jitter
          image = normalize_image(image)

          def trim_and_pad(inp_tensor):
            """Limit the number of boxes, and pad if necessary."""
            inp_tensor = inp_tensor[:ssd_constants.MAX_NUM_EVAL_BOXES]
            num_pad = ssd_constants.MAX_NUM_EVAL_BOXES - tf.shape(inp_tensor)[0]
            inp_tensor = tf.pad(inp_tensor, [[0, num_pad], [0, 0]])
            return tf.reshape(inp_tensor, [ssd_constants.MAX_NUM_EVAL_BOXES,
                                           inp_tensor.get_shape()[1]])

          boxes, classes = trim_and_pad(boxes), trim_and_pad(classes)

          # TODO(haoyuzhang): measure or remove this overhead of concat
          # Encode labels into a single tensor, in order to be compatible with
          # staging area in data input pipeline.
          # Shape of boxes: [MAX_NUM_EVAL_BOXES, 4]
          # Shape of classes: [MAX_NUM_EVAL_BOXES, 1]
          # Shape of source_id: [] (scalar tensor)
          # Shape of raw_shape: [3]
          boxes_classes = tf.concat([boxes, classes], 1)
          id_shape = tf.concat([tf.expand_dims(source_id, 0),
                                tf.cast(raw_shape, tf.float32),
                                tf.constant([0.])], 0)
          # id_shape: [source_id, raw_shape_H, raw_shape_W, raw_shape_C, 0]
          labels = tf.concat([boxes_classes, tf.expand_dims(id_shape, 0)], 0)
          # Shape of labels: [MAX_NUM_EVAL_BOXES+1, 5]
          return image, labels

    batch_size_per_split = params['batch_size_per_split']
    num_splits = params['num_splits']
    dataset = tf.data.Dataset.list_files(
        self._file_pattern, shuffle=self._is_training)
    # Repeat for both training and validation datasets because the number of
    # validation examples is typically not a multiply of batch size, and COCO
    # metric requires all results to be collected before calculating metrics.
    dataset = dataset.repeat()

    # Prefetch data from files.
    def _prefetch_dataset(filename):
      dataset = tf.data.TFRecordDataset(filename).prefetch(batch_size_per_split)
      return dataset
    dataset = dataset.apply(
        tf.contrib.data.parallel_interleave(
            _prefetch_dataset, cycle_length=32, sloppy=self._is_training))

    if self._is_training:
      dataset = dataset.shuffle(64)

    # Parse the fetched records to input tensors for model function.
    dataset = dataset.map(example_decoder.decode, num_parallel_calls=64)

    # TODO(taylorrobie): Confirm that this is MLPerf rules compliant.
    dataset = dataset.filter(
        lambda data: tf.greater(tf.shape(data['groundtruth_boxes'])[0], 0))
    dataset = dataset.apply(batching.map_and_batch(
        map_func=_parse_example,
        batch_size=batch_size_per_split,
        num_parallel_batches=num_splits,
        drop_remainder=True))
    dataset = dataset.prefetch(buffer_size=num_splits)
    return dataset
Beispiel #33
0
    def __init__(self, sess, batch_size, shuffle, is_training, config, dataset_path, input_size):
        self.ds_handle_ph = tf.placeholder(tf.string, shape=[])
        self.sess = sess
        self.config = config
        self.input_size = input_size
        self.dataset_path = dataset_path
        self.training_handle = None
        self.validation_handle = None

        if is_training:
            conf_key = "train_name"
            dataset_map = self.train_dataset_map
        else:
            conf_key = "validation_name"
            dataset_map = self.test_dataset_map

        # files = tf.data.Dataset.list_files(dataset_path)
        files = tf.data.Dataset.list_files(
            os.path.join(config.dataset_dir, "%s_%s*tfrecord" % (config.dataset_name, getattr(config, conf_key))))

        # if hasattr(tf.contrib.data, "parallel_interleave"):
        #     ds = files.apply(tf.contrib.data.parallel_interleave(
        #         tf.data.TFRecordDataset, cycle_length=config.num_parallel_readers))
        # else:
        ds = files.interleave(tf.data.TFRecordDataset, cycle_length=config.num_parallel_readers)

        if config.cache_data:
            ds = ds.take(1).cache().repeat()

        counter = tf.data.Dataset.range(batch_size)
        counter = counter.repeat()
        ds = tf.data.Dataset.zip((ds, counter))
        ds = ds.prefetch(buffer_size=batch_size)
        # ds = ds.repeat()
        if shuffle:
            ds = ds.shuffle(buffer_size=config.buffer_size)
        if True:  # config.num_gpus > 1:
            batch_size_per_split = batch_size // config.num_gpus
            images = []
            labels = []

            ds = ds.apply(
                batching.map_and_batch(
                    map_func=dataset_map,
                    batch_size=batch_size_per_split,
                    num_parallel_batches=config.num_gpus))
            ds = ds.prefetch(buffer_size=config.num_gpus)
            # ds = ds.map(dataset_map, num_parallel_calls=batch_size)
            # ds = ds.batch(batch_size)
            # ds = ds.prefetch(buffer_size=batch_size)

            iterator = tf.data.Iterator.from_string_handle(
                self.ds_handle_ph, ds.output_types, ds.output_shapes)

            if config.datasets_num_private_threads:

                ds = threadpool.override_threadpool(
                    ds,
                    threadpool.PrivateThreadPool(
                        config.datasets_num_private_threads, display_name='input_pipeline_thread_pool'))
                self.training_iterator = ds.make_initializable_iterator()
                tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
                                     self.training_iterator.initializer)

            else:
                self.training_iterator = ds.make_one_shot_iterator()

            # self.training_iterator = ds.make_one_shot_iterator()
            for d in range(config.num_gpus):
                image, label = iterator.get_next()
                size = image.get_shape()[1]
                depth = image.get_shape()[3]
                image = tf.reshape(
                    image, shape=[batch_size_per_split, size, size, depth])
                label = tf.reshape(label, [batch_size_per_split, config.num_class])
                labels.append(label)
                images.append(image)
                # labels[d], images[d] = iterator.get_next()

            # for split_index in range(config.num_gpus):
            #     images[split_index] = tf.reshape(
            #         images[split_index],
            #         shape=[batch_size_per_split, config.input_size, config.input_size,
            #                config.num_channel])
            #     labels[split_index] = tf.reshape(labels[split_index],
            #                                      [batch_size_per_split])
            self.images = images
            self.labels = labels

        else:
            if hasattr(tf.contrib.data, "map_and_batch"):
                ds = ds.apply(tf.contrib.data.map_and_batch(map_func=dataset_map, batch_size=batch_size))
            else:
                ds = ds.map(map_func=dataset_map, num_parallel_calls=config.num_parallel_calls)
                ds = ds.batch(batch_size)
            ds = ds.prefetch(buffer_size=batch_size)

            self.iterator = ds.make_initializable_iterator()
            self.next_batch = self.iterator.get_next()
  def testMapAndBatch(self, num_parallel_calls, num_parallel_batches):
    """Test a dataset that maps a TF function across its input elements."""
    # The pipeline is TensorSliceDataset ->
    # RepeatDataset(count) -> MapAndBatchDataset(square_3, batch_size).
    components = (np.arange(7),
                  np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                  np.array(37.0) * np.arange(7))

    count = array_ops.placeholder(dtypes.int64, shape=[])
    batch_size = array_ops.placeholder(dtypes.int64, shape=[])

    def _map_fn(x, y, z):
      return math_ops.square(x), math_ops.square(y), math_ops.square(z)

    iterator = (
        dataset_ops.Dataset.from_tensor_slices(components).repeat(count).apply(
            batching.map_and_batch(
                map_func=_map_fn,
                batch_size=batch_size,
                num_parallel_calls=num_parallel_calls,
                num_parallel_batches=num_parallel_batches))
        .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    self.assertEqual([[None] + list(c.shape[1:]) for c in components],
                     [t.shape.as_list() for t in get_next])

    with self.cached_session() as sess:
      # Batch of a finite input, where the batch_size divides the
      # total number of elements.
      sess.run(init_op, feed_dict={count: 28, batch_size: 14})
      num_batches = (28 * 7) // 14
      for i in range(num_batches):
        result = sess.run(get_next)
        for component, result_component in zip(components, result):
          for j in range(14):
            self.assertAllEqual(component[(i * 14 + j) % 7]**2,
                                result_component[j])
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Batch of a finite input, where the batch_size does not
      # divide the total number of elements.
      sess.run(init_op, feed_dict={count: 14, batch_size: 8})

      # We expect (num_batches - 1) full-sized batches.
      num_batches = int(math.ceil((14 * 7) / 8))
      for i in range(num_batches - 1):
        result = sess.run(get_next)
        for component, result_component in zip(components, result):
          for j in range(8):
            self.assertAllEqual(component[(i * 8 + j) % 7]**2,
                                result_component[j])
      result = sess.run(get_next)
      for component, result_component in zip(components, result):
        for j in range((14 * 7) % 8):
          self.assertAllEqual(component[((num_batches - 1) * 8 + j) % 7]**2,
                              result_component[j])
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Batch of an empty input should fail straight away.
      sess.run(init_op, feed_dict={count: 0, batch_size: 8})
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Empty batch should be an initialization time error.
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(init_op, feed_dict={count: 14, batch_size: 0})
Beispiel #35
0
def get_input_fn(filenames,
                 batch_size=1,
                 num_threads=2,
                 perform_shuffle=False,
                 perform_augmentation=False,
                 per_image_standardization=False,
                 enable_cutout=False,
                 repeat_count=1):
    """
    Input pipeline for ImageNet tfrecord
    """
    def parse(example_proto):
        features = {
            'image/class/label':
            tf.FixedLenFeature([], dtype=tf.int64, default_value=-1),
            'image/encoded':
            tf.FixedLenFeature((), tf.string, default_value=''),
            "image/height":
            tf.FixedLenFeature((), tf.int64, default_value=0),
            "image/width":
            tf.FixedLenFeature((), tf.int64, default_value=0),
        }

        parsed_features = tf.parse_single_example(example_proto, features)

        encoded_image = parsed_features['image/encoded']

        height = tf.cast(parsed_features['image/height'], tf.int32)
        width = tf.cast(parsed_features['image/width'], tf.int32)

        label = tf.cast(parsed_features['image/class/label'], tf.int32)
        label = tf.reshape(label, [])

        image = tf.image.decode_image(encoded_image, channels=3)
        image = tf.reshape(image, [height, width, 3])

        return image, label

    def resize(image):
        # resize_bilinear needs a 4-D tensor
        image = tf.expand_dims(image, 0)
        # resize to target dimensions. output image's type is float
        image = tf.image.resize_bilinear(image, [TARGET_HEIGHT, TARGET_WIDTH])
        # remove extra dimension introduced for resize_bilinear
        image = tf.squeeze(image, [0])

        return image

    def distort_image(image):
        bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
                           dtype=tf.float32,
                           shape=[1, 1, 4])

        bbox_begin, bbox_size, _ = tf.image.sample_distorted_bounding_box(
            tf.shape(image),
            bounding_boxes=bbox,
            min_object_covered=min_object_covered,
            aspect_ratio_range=aspect_ratio_range,
            area_range=area_range,
            max_attempts=max_attempts,
            use_image_if_no_bounding_boxes=True)

        # Crop the image to the specified bounding box.
        image = tf.slice(image, bbox_begin, bbox_size)
        image.set_shape([None, None, 3])

        return image

    def augment(image):

        # distort image
        image = distort_image(image)

        # resize_bilinear
        image = resize(image)

        # Randomly flip the image horizontally.
        image = tf.image.random_flip_left_right(image)

        image = tf.image.random_brightness(image, max_delta=32. / 255.)
        image = tf.image.random_saturation(image, lower=0.5, upper=1.5)

        image = tf.clip_by_value(image, 0.0, 1.0)

        if enable_cutout:
            image = cutout(image,
                           p=0.5,
                           s_l=0.02,
                           s_h=0.4,
                           r_1=0.3,
                           r_2=3.3,
                           v_l=0,
                           v_h=1.0)

        return image

    def preprocess_fn(example_proto):

        # decode example from proto
        image, label = parse(example_proto)

        image = tf.image.convert_image_dtype(image, dtype=tf.float32)

        if perform_augmentation:
            # data augmentation and resize
            image = augment(image)
        else:
            # central crop like slim
            image = tf.image.central_crop(image,
                                          central_fraction=central_fraction)
            # resize
            image = resize(image)

        if per_image_standardization:
            # Subtract off the mean and divide by the variance of the pixels
            image = tf.image.per_image_standardization(image)
        else:
            # Convert from [0, 255] -> [-1.0, 1.0] floats.
            image = tf.subtract(image, 0.5)
            image = tf.multiply(image, 2.0)

        # convert from HWC to CHW
        image = tf.transpose(image, [2, 0, 1])

        # Convert label from a scalar uint8 tensor to an int32 scalar.
        label = tf.cast(label, tf.int32)

        return image, label

    ds = tf.data.TFRecordDataset.list_files(filenames)

    ds = ds.apply(
        interleave_ops.parallel_interleave(tf.data.TFRecordDataset,
                                           cycle_length=10))

    ds = ds.prefetch(buffer_size=batch_size)

    if perform_shuffle:
        ds = ds.shuffle(buffer_size=10000)

    ds = ds.repeat(repeat_count)
    ds = ds.apply(
        batching.map_and_batch(map_func=preprocess_fn,
                               batch_size=batch_size,
                               num_parallel_batches=2))

    ds = ds.prefetch(buffer_size=1)

    if num_threads:
        ds = threadpool.override_threadpool(
            ds,
            threadpool.PrivateThreadPool(
                num_threads, display_name='input_pipeline_thread_pool'))

    return ds
 def dataset_fn():
   dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
   # TODO(isaprykin): map_and_batch with drop_remainder causes shapes to be
   # fully defined for TPU.  Remove this when XLA supports dynamic shapes.
   return dataset.apply(
       batching.map_and_batch(lambda x: x, batch_size=1, drop_remainder=True))
  def minibatch(self, dataset, subset, use_datasets, cache_data,
                shift_ratio=-1):
    if shift_ratio < 0:
      shift_ratio = self.shift_ratio
    with tf.name_scope('batch_processing'):
      # Build final results per split.
      images = [[] for _ in range(self.num_splits)]
      labels = [[] for _ in range(self.num_splits)]
      if use_datasets:
        glob_pattern = dataset.tf_record_pattern(subset)
        file_names = gfile.Glob(glob_pattern)
        if not file_names:
          raise ValueError('Found no files in --data_dir matching: {}'
                           .format(glob_pattern))
        ds = tf.data.TFRecordDataset.list_files(file_names)
        ds = ds.apply(
            interleave_ops.parallel_interleave(
                tf.data.TFRecordDataset, cycle_length=10))
        if cache_data:
          ds = ds.take(1).cache().repeat()
        counter = tf.data.Dataset.range(self.batch_size)
        counter = counter.repeat()
        ds = tf.data.Dataset.zip((ds, counter))
        ds = ds.prefetch(buffer_size=self.batch_size)
        ds = ds.shuffle(buffer_size=10000)
        ds = ds.repeat()
        ds = ds.apply(
            batching.map_and_batch(
                map_func=self.parse_and_preprocess,
                batch_size=self.batch_size_per_split,
                num_parallel_batches=self.num_splits))
        ds = ds.prefetch(buffer_size=self.num_splits)
        ds_iterator = ds.make_one_shot_iterator()
        for d in xrange(self.num_splits):
          labels[d], images[d] = ds_iterator.get_next()

      else:
        record_input = data_flow_ops.RecordInput(
            file_pattern=dataset.tf_record_pattern(subset),
            seed=301,
            parallelism=64,
            buffer_size=10000,
            batch_size=self.batch_size,
            shift_ratio=shift_ratio,
            name='record_input')
        records = record_input.get_yield_op()
        records = tf.split(records, self.batch_size, 0)
        records = [tf.reshape(record, []) for record in records]
        for idx in xrange(self.batch_size):
          value = records[idx]
          (label, image) = self.parse_and_preprocess(value, idx)
          split_index = idx % self.num_splits
          labels[split_index].append(label)
          images[split_index].append(image)

      for split_index in xrange(self.num_splits):
        if not use_datasets:
          images[split_index] = tf.parallel_stack(images[split_index])
          labels[split_index] = tf.concat(labels[split_index], 0)
        images[split_index] = tf.cast(images[split_index], self.dtype)
        depth = 3
        images[split_index] = tf.reshape(
            images[split_index],
            shape=[self.batch_size_per_split, self.height, self.width, depth])
        labels[split_index] = tf.reshape(labels[split_index],
                                         [self.batch_size_per_split])
      return images, labels
    def benchmark(label, series):

      print("%s:" % label)
      for num_calls, inter_op, element_size, batch_size in series:

        num_iters = 1024 // (
            (element_size * batch_size) // min(num_calls, inter_op))
        k = 1024 * 1024
        dataset = dataset_ops.Dataset.from_tensors((np.random.rand(
            element_size, 4 * k), np.random.rand(4 * k, 1))).repeat()

        chained_dataset = dataset.map(
            math_ops.matmul,
            num_parallel_calls=num_calls).batch(batch_size=batch_size)
        chained_iterator = chained_dataset.make_one_shot_iterator()
        chained_get_next = chained_iterator.get_next()

        chained_deltas = []
        with session.Session(
            config=config_pb2.ConfigProto(
                inter_op_parallelism_threads=inter_op,
                use_per_session_threads=True)) as sess:
          for _ in range(5):
            sess.run(chained_get_next.op)
          for _ in range(num_iters):
            start = time.time()
            sess.run(chained_get_next.op)
            end = time.time()
            chained_deltas.append(end - start)

        fused_dataset = dataset = dataset.apply(
            batching.map_and_batch(
                math_ops.matmul,
                num_parallel_calls=num_calls,
                batch_size=batch_size))
        fused_iterator = fused_dataset.make_one_shot_iterator()
        fused_get_next = fused_iterator.get_next()

        fused_deltas = []
        with session.Session(
            config=config_pb2.ConfigProto(
                inter_op_parallelism_threads=inter_op,
                use_per_session_threads=True)) as sess:

          for _ in range(5):
            sess.run(fused_get_next.op)
          for _ in range(num_iters):
            start = time.time()
            sess.run(fused_get_next.op)
            end = time.time()
            fused_deltas.append(end - start)

        print(
            "batch size: %d, num parallel calls: %d, inter-op parallelism: %d, "
            "element size: %d, num iters: %d\nchained wall time: %f (median), "
            "%f (mean), %f (stddev), %f (min), %f (max)\n  fused wall time: "
            "%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n    "
            "chained/fused:    %.2fx (median),    %.2fx (mean)" %
            (batch_size, num_calls, inter_op, element_size, num_iters,
             np.median(chained_deltas), np.mean(chained_deltas),
             np.std(chained_deltas), np.min(chained_deltas),
             np.max(chained_deltas), np.median(fused_deltas),
             np.mean(fused_deltas), np.std(fused_deltas), np.min(fused_deltas),
             np.max(fused_deltas),
             np.median(chained_deltas) / np.median(fused_deltas),
             np.mean(chained_deltas) / np.mean(fused_deltas)))

        self.report_benchmark(
            iters=num_iters,
            wall_time=np.median(chained_deltas),
            name=name("chained", label, num_calls, inter_op, element_size,
                      batch_size))

        self.report_benchmark(
            iters=num_iters,
            wall_time=np.median(fused_deltas),
            name=name("fused", label, num_calls, inter_op, element_size,
                      batch_size))

      print("")
Beispiel #39
0
  def _testMapAndBatchDatasetHelper(self,
                                    num_parallel_calls=None,
                                    num_parallel_batches=None):
    """Test a dataset that maps a TF function across its input elements."""
    # The pipeline is TensorSliceDataset ->
    # RepeatDataset(count) -> MapAndBatchDataset(square_3, batch_size).
    components = (np.arange(7),
                  np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
                  np.array(37.0) * np.arange(7))

    count = array_ops.placeholder(dtypes.int64, shape=[])
    batch_size = array_ops.placeholder(dtypes.int64, shape=[])

    def _map_fn(x, y, z):
      return math_ops.square(x), math_ops.square(y), math_ops.square(z)

    iterator = (
        dataset_ops.Dataset.from_tensor_slices(components).repeat(count).apply(
            batching.map_and_batch(
                map_func=_map_fn,
                batch_size=batch_size,
                num_parallel_calls=num_parallel_calls,
                num_parallel_batches=num_parallel_batches))
        .make_initializable_iterator())
    init_op = iterator.initializer
    get_next = iterator.get_next()

    self.assertEqual([[None] + list(c.shape[1:]) for c in components],
                     [t.shape.as_list() for t in get_next])

    with self.test_session() as sess:
      # Batch of a finite input, where the batch_size divides the
      # total number of elements.
      sess.run(init_op, feed_dict={count: 28, batch_size: 14})
      num_batches = (28 * 7) // 14
      for i in range(num_batches):
        result = sess.run(get_next)
        for component, result_component in zip(components, result):
          for j in range(14):
            self.assertAllEqual(component[(i * 14 + j) % 7]**2,
                                result_component[j])
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Batch of a finite input, where the batch_size does not
      # divide the total number of elements.
      sess.run(init_op, feed_dict={count: 14, batch_size: 8})

      # We expect (num_batches - 1) full-sized batches.
      num_batches = int(math.ceil((14 * 7) / 8))
      for i in range(num_batches - 1):
        result = sess.run(get_next)
        for component, result_component in zip(components, result):
          for j in range(8):
            self.assertAllEqual(component[(i * 8 + j) % 7]**2,
                                result_component[j])
      result = sess.run(get_next)
      for component, result_component in zip(components, result):
        for j in range((14 * 7) % 8):
          self.assertAllEqual(component[((num_batches - 1) * 8 + j) % 7]**2,
                              result_component[j])
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Batch of an empty input should fail straight away.
      sess.run(init_op, feed_dict={count: 0, batch_size: 8})
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(get_next)

      # Empty batch should be an initialization time error.
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(init_op, feed_dict={count: 14, batch_size: 0})
Beispiel #40
0
        def benchmark(label, series):

            print("%s:" % label)
            for num_calls, inter_op, element_size, batch_size in series:

                num_iters = 1024 // (
                    (element_size * batch_size) // min(num_calls, inter_op))
                k = 1024 * 1024
                dataset = dataset_ops.Dataset.from_tensors(
                    (np.random.rand(element_size,
                                    4 * k), np.random.rand(4 * k,
                                                           1))).repeat()

                chained_dataset = dataset.map(
                    math_ops.matmul,
                    num_parallel_calls=num_calls).batch(batch_size=batch_size)
                chained_iterator = chained_dataset.make_one_shot_iterator()
                chained_get_next = chained_iterator.get_next()

                chained_deltas = []
                with session.Session(config=config_pb2.ConfigProto(
                        inter_op_parallelism_threads=inter_op,
                        use_per_session_threads=True)) as sess:
                    for _ in range(5):
                        sess.run(chained_get_next.op)
                    for _ in range(num_iters):
                        start = time.time()
                        sess.run(chained_get_next.op)
                        end = time.time()
                        chained_deltas.append(end - start)

                fused_dataset = dataset = dataset.apply(
                    batching.map_and_batch(math_ops.matmul,
                                           num_parallel_calls=num_calls,
                                           batch_size=batch_size))
                fused_iterator = fused_dataset.make_one_shot_iterator()
                fused_get_next = fused_iterator.get_next()

                fused_deltas = []
                with session.Session(config=config_pb2.ConfigProto(
                        inter_op_parallelism_threads=inter_op,
                        use_per_session_threads=True)) as sess:

                    for _ in range(5):
                        sess.run(fused_get_next.op)
                    for _ in range(num_iters):
                        start = time.time()
                        sess.run(fused_get_next.op)
                        end = time.time()
                        fused_deltas.append(end - start)

                print(
                    "batch size: %d, num parallel calls: %d, inter-op parallelism: %d, "
                    "element size: %d, num iters: %d\nchained wall time: %f (median), "
                    "%f (mean), %f (stddev), %f (min), %f (max)\n  fused wall time: "
                    "%f (median), %f (mean), %f (stddev), %f (min), %f (max)\n    "
                    "chained/fused:    %.2fx (median),    %.2fx (mean)" %
                    (batch_size, num_calls, inter_op, element_size, num_iters,
                     np.median(chained_deltas), np.mean(chained_deltas),
                     np.std(chained_deltas), np.min(chained_deltas),
                     np.max(chained_deltas), np.median(fused_deltas),
                     np.mean(fused_deltas), np.std(fused_deltas),
                     np.min(fused_deltas), np.max(fused_deltas),
                     np.median(chained_deltas) / np.median(fused_deltas),
                     np.mean(chained_deltas) / np.mean(fused_deltas)))

                self.report_benchmark(iters=num_iters,
                                      wall_time=np.median(chained_deltas),
                                      name=name("chained", label, num_calls,
                                                inter_op, element_size,
                                                batch_size))

                self.report_benchmark(iters=num_iters,
                                      wall_time=np.median(fused_deltas),
                                      name=name("fused", label, num_calls,
                                                inter_op, element_size,
                                                batch_size))

            print("")
Beispiel #41
0
def make_csv_dataset(
    file_pattern,
    batch_size,
    column_names=None,
    column_defaults=None,
    label_name=None,
    select_columns=None,
    field_delim=",",
    use_quote_delim=True,
    na_value="",
    header=True,
    comment=None,
    num_epochs=None,
    shuffle=True,
    shuffle_buffer_size=10000,
    shuffle_seed=None,
    prefetch_buffer_size=1,
    num_parallel_reads=1,
    num_parallel_parser_calls=2,
    sloppy=False,
    default_float_type=dtypes.float32,
    num_rows_for_inference=100,
):
    """Reads CSV files into a dataset.

  Reads CSV files into a dataset, where each element is a (features, labels)
  tuple that corresponds to a batch of CSV rows. The features dictionary
  maps feature column names to `Tensor`s containing the corresponding
  feature data, and labels is a `Tensor` containing the batch's label data.

  Args:
    file_pattern: List of files or patterns of file paths containing CSV
      records. See @{tf.gfile.Glob} for pattern rules.
    batch_size: An int representing the number of consecutive elements of this
      dataset to combine in a single batch.
    column_names: An optional list of strings that corresponds to the CSV
      columns, in order. One per column of the input record. If this is not
      provided, infers the column names from the first row of the records.
      These names will be the keys of the features dict of each dataset element.
    column_defaults: A optional list of default values for the CSV fields. One
      item per selected column of the input record. Each item in the list is
      either a valid CSV dtype (float32, float64, int32, int64, or string), or a
      `Tensor` with one of the aforementioned types. The tensor can either be
      a scalar default value (if the column is optional), or an empty tensor (if
      the column is required). If a dtype is provided instead of a tensor, the
      column is also treated as required. If this list is not provided, tries
      to infer types based on reading the first num_rows_for_inference rows of
      files specified, and assumes all columns are optional, defaulting to `0`
      for numeric values and `""` for string values. If both this and
      `select_columns` are specified, these must have the same lengths, and
      `column_defaults` is assumed to be sorted in order of increasing column
      index.
    label_name: A optional string corresponding to the label column. If
      provided, the data for this column is returned as a separate `Tensor` from
      the features dictionary, so that the dataset complies with the format
      expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
      function.
    select_columns: An optional list of integer indices or string column
      names, that specifies a subset of columns of CSV data to select. If
      column names are provided, these must correspond to names provided in
      `column_names` or inferred from the file header lines. When this argument
      is specified, only a subset of CSV columns will be parsed and returned,
      corresponding to the columns specified. Using this results in faster
      parsing and lower memory usage. If both this and `column_defaults` are
      specified, these must have the same lengths, and `column_defaults` is
      assumed to be sorted in order of increasing column index.
    field_delim: An optional `string`. Defaults to `","`. Char delimiter to
      separate fields in a record.
    use_quote_delim: An optional bool. Defaults to `True`. If false, treats
      double quotation marks as regular characters inside of the string fields.
    na_value: Additional string to recognize as NA/NaN.
    header: A bool that indicates whether the first rows of provided CSV files
      correspond to header lines with column names, and should not be included
      in the data.
    comment: An optional character string that marks lines that should not be
      parsed as csv records. If this is provided, all lines that start with
      this character will not be parsed.
    num_epochs: An int specifying the number of times this dataset is repeated.
      If None, cycles through the dataset forever.
    shuffle: A bool that indicates whether the input should be shuffled.
    shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
      ensures better shuffling, but would increase memory usage and startup
      time.
    shuffle_seed: Randomization seed to use for shuffling.
    prefetch_buffer_size: An int specifying the number of feature batches to
      prefetch for performance improvement. Recommended value is the number of
      batches consumed per training step.
    num_parallel_reads: Number of threads used to read CSV records from files.
      If >1, the results will be interleaved.
    num_parallel_parser_calls: Number of parallel invocations of the CSV parsing
      function on CSV records.
    sloppy: If `True`, reading performance will be improved at
      the cost of non-deterministic ordering. If `False`, the order of elements
      produced is deterministic prior to shuffling (elements are still
      randomized if `shuffle=True`. Note that if the seed is set, then order
      of elements after shuffling is deterministic). Defaults to `False`.
    default_float_type: Either `tf.float32` or `tf.float64`. If defaults are
      not provided, float-like strings are interpreted to be this type.
    num_rows_for_inference: Number of rows of a file to use for type inference
      if record_defaults is not provided. If None, reads all the rows of all
      the files. Defaults to 100.

  Returns:
    A dataset, where each element is a (features, labels) tuple that corresponds
    to a batch of `batch_size` CSV rows. The features dictionary maps feature
    column names to `Tensor`s containing the corresponding column data, and
    labels is a `Tensor` containing the column data for the label column
    specified by `label_name`.

  Raises:
    ValueError: If any of the arguments is malformed.
  """
    # Create dataset of all matching filenames
    filenames = _get_file_names(file_pattern, False)
    dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
    if shuffle:
        dataset = dataset.shuffle(len(filenames), shuffle_seed)

    # Clean arguments; figure out column names and defaults
    if comment is not None and len(comment) != 1:
        raise ValueError(
            "`comment` arg must be a single-character string or None")

    if column_names is None:
        if not header:
            raise ValueError(
                "Cannot infer column names without a header line.")
        # If column names are not provided, infer from the header lines
        column_names = _infer_column_names(filenames, field_delim,
                                           use_quote_delim)
    if len(column_names) != len(set(column_names)):
        raise ValueError("Cannot have duplicate column names.")

    if select_columns is not None:
        select_columns = _get_sorted_col_indices(select_columns, column_names)

    if column_defaults is not None:
        column_defaults = [
            constant_op.constant([], dtype=x)
            if x in _ACCEPTABLE_CSV_TYPES else x for x in column_defaults
        ]
    else:
        # If column defaults are not provided, infer from records at graph
        # construction time
        column_defaults = _infer_column_defaults(filenames, len(column_names),
                                                 field_delim, use_quote_delim,
                                                 na_value, header, comment,
                                                 default_float_type,
                                                 num_rows_for_inference,
                                                 select_columns)

    if select_columns is not None and len(column_defaults) != len(
            select_columns):
        raise ValueError(
            "If specified, column_defaults and select_columns must have same "
            "length.")
    if select_columns is not None and len(column_names) > len(select_columns):
        # Pick the relevant subset of column names
        column_names = [column_names[i] for i in select_columns]

    if label_name is not None and label_name not in column_names:
        raise ValueError("`label_name` provided must be one of the columns.")

    # Define map and filter functions
    def filter_fn(line):
        return math_ops.not_equal(string_ops.substr(line, 0, 1), comment)

    def filename_to_dataset(filename):
        ds = core_readers.TextLineDataset(filename)
        if header:
            ds = ds.skip(1)
        if comment is not None:
            ds = ds.filter(filter_fn)
        return ds

    def decode_csv(line):
        """Decodes CSV line into features.

    Args:
      line: String tensor corresponding to one csv record.
    Returns:
      A dictionary of feature names to values for that particular record. If
      label_name is provided, extracts the label feature to be returned as the
      second element of the tuple.
    """
        columns = parsing_ops.decode_csv(
            line,
            column_defaults,
            field_delim=field_delim,
            use_quote_delim=use_quote_delim,
            na_value=na_value,
            select_cols=select_columns,
        )
        features = dict(zip(column_names, columns))
        if label_name is not None:
            label = features.pop(label_name)
            return features, label
        return features

    # Read files sequentially or in parallel
    dataset = dataset.apply(
        interleave_ops.parallel_interleave(filename_to_dataset,
                                           cycle_length=num_parallel_reads,
                                           sloppy=sloppy))

    if num_epochs != 1 and shuffle:
        # Use shuffle_and_repeat for perf
        dataset = dataset.apply(
            shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
                                           shuffle_seed))
    elif shuffle:
        dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
    elif num_epochs != 1:
        dataset = dataset.repeat(num_epochs)

    # Use map_and_batch for perf
    # TODO(b/76425672): use num_parallel_calls for better performance tuning when
    # that is added
    dataset = dataset.apply(
        batching.map_and_batch(map_func=decode_csv,
                               batch_size=batch_size,
                               num_parallel_batches=int(
                                   ceil(num_parallel_parser_calls /
                                        batch_size))))

    dataset = dataset.prefetch(prefetch_buffer_size)
    return dataset
Beispiel #42
0
def make_tf_record_dataset(file_pattern,
                           batch_size,
                           parser_fn=None,
                           num_epochs=None,
                           shuffle=True,
                           shuffle_buffer_size=None,
                           shuffle_seed=None,
                           prefetch_buffer_size=None,
                           num_parallel_reads=None,
                           num_parallel_parser_calls=None,
                           drop_final_batch=False):
    """Reads and optionally parses TFRecord files into a dataset.

  Provides common functionality such as batching, optional parsing, shuffling,
  and performant defaults.

  Args:
    file_pattern: List of files or patterns of TFRecord file paths.
      See `tf.gfile.Glob` for pattern rules.
    batch_size: An int representing the number of records to combine
      in a single batch.
    parser_fn: (Optional.) A function accepting string input to parse
      and process the record contents. This function must map records
      to components of a fixed shape, so they may be batched. By
      default, uses the record contents unmodified.
    num_epochs: (Optional.) An int specifying the number of times this
      dataset is repeated.  If None (the default), cycles through the
      dataset forever.
    shuffle: (Optional.) A bool that indicates whether the input
      should be shuffled. Defaults to `True`.
    shuffle_buffer_size: (Optional.) Buffer size to use for
      shuffling. A large buffer size ensures better shuffling, but
      increases memory usage and startup time.
    shuffle_seed: (Optional.) Randomization seed to use for shuffling.
    prefetch_buffer_size: (Optional.) An int specifying the number of
      feature batches to prefetch for performance improvement.
      Defaults to auto-tune. Set to 0 to disable prefetching.
    num_parallel_reads: (Optional.) Number of threads used to read
      records from files. By default or if set to a value >1, the
      results will be interleaved.
    num_parallel_parser_calls: (Optional.) Number of parallel
      records to parse in parallel. Defaults to an automatic selection.
    drop_final_batch: (Optional.) Whether the last batch should be
      dropped in case its size is smaller than `batch_size`; the
      default behavior is not to drop the smaller batch.

  Returns:
    A dataset, where each element matches the output of `parser_fn`
    except it will have an additional leading `batch-size` dimension,
    or a `batch_size`-length 1-D tensor of strings if `parser_fn` is
    unspecified.
  """
    files = dataset_ops.Dataset.list_files(file_pattern,
                                           shuffle=shuffle,
                                           seed=shuffle_seed)

    if num_parallel_reads is None:
        # Note: We considered auto-tuning this value, but there is a concern
        # that this affects the mixing of records from different files, which
        # could affect training convergence/accuracy, so we are defaulting to
        # a constant for now.
        num_parallel_reads = 24
    dataset = core_readers.TFRecordDataset(
        files, num_parallel_reads=num_parallel_reads)

    if shuffle_buffer_size is None:
        # TODO(josh11b): Auto-tune this value when not specified
        shuffle_buffer_size = 10000
    dataset = _maybe_shuffle_and_repeat(dataset, num_epochs, shuffle,
                                        shuffle_buffer_size, shuffle_seed)

    # NOTE(mrry): We set `drop_final_batch=True` when `num_epochs is None` to
    # improve the shape inference, because it makes the batch dimension static.
    # It is safe to do this because in that case we are repeating the input
    # indefinitely, and all batches will be full-sized.
    drop_final_batch = drop_final_batch or num_epochs is None

    if parser_fn is None:
        dataset = dataset.batch(batch_size, drop_remainder=drop_final_batch)
    else:
        # TODO(josh11b): if num_parallel_parser_calls is None, use some function
        # of num cores instead of map_and_batch's default behavior of one batch.
        dataset = dataset.apply(
            batching.map_and_batch(
                parser_fn,
                batch_size,
                num_parallel_calls=num_parallel_parser_calls,
                drop_remainder=drop_final_batch))

    if prefetch_buffer_size is None:
        prefetch_buffer_size = -1  # tf.config.data.AUTOTUNE
    if prefetch_buffer_size == 0:
        return dataset
    else:
        return dataset.prefetch(buffer_size=prefetch_buffer_size)
Beispiel #43
0
def make_csv_dataset(
    file_pattern,
    batch_size,
    column_names=None,
    column_defaults=None,
    label_name=None,
    field_delim=",",
    use_quote_delim=True,
    na_value="",
    header=True,
    comment=None,
    num_epochs=None,
    shuffle=True,
    shuffle_buffer_size=10000,
    shuffle_seed=None,
    prefetch_buffer_size=1,
    num_parallel_reads=1,
    num_parallel_parser_calls=2,
    sloppy=False,
    default_float_type=dtypes.float32,
    num_rows_for_inference=100,
):
  """Reads CSV files into a dataset.

  Reads CSV files into a dataset, where each element is a (features, labels)
  tuple that corresponds to a batch of CSV rows. The features dictionary
  maps feature column names to `Tensor`s containing the corresponding
  feature data, and labels is a `Tensor` containing the batch's label data.

  Args:
    file_pattern: List of files or patterns of file paths containing CSV
      records. See @{tf.gfile.Glob} for pattern rules.
    batch_size: An int representing the number of consecutive elements of this
      dataset to combine in a single batch.
    column_names: An optional list of strings that corresponds to the CSV
      columns, in order. One per column of the input record. If this is not
      provided, infers the column names from the first row of the records.
      These names will be the keys of the features dict of each dataset element.
    column_defaults: A optional list of default values for the CSV fields. One
      item per column of the input record. Each item in the list is either a
      valid CSV dtype (float32, float64, int32, int64, or string), or a
      `Tensor` with one of the aforementioned types. The tensor can either be
      a scalar default value (if the column is optional), or an empty tensor (if
      the column is required). If a dtype is provided instead of a tensor, the
      column is also treated as required. If this list is not provided, tries
      to infer types based on reading the first num_rows_for_inference rows of
      files specified, and assumes all columns are optional, defaulting to `0`
      for numeric values and `""` for string values.
    label_name: A optional string corresponding to the label column. If
      provided, the data for this column is returned as a separate `Tensor` from
      the features dictionary, so that the dataset complies with the format
      expected by a `tf.Estimator.train` or `tf.Estimator.evaluate` input
      function.
    field_delim: An optional `string`. Defaults to `","`. Char delimiter to
      separate fields in a record.
    use_quote_delim: An optional bool. Defaults to `True`. If false, treats
      double quotation marks as regular characters inside of the string fields.
    na_value: Additional string to recognize as NA/NaN.
    header: A bool that indicates whether the first rows of provided CSV files
      correspond to header lines with column names, and should not be included
      in the data.
    comment: An optional character string that marks lines that should not be
      parsed as csv records. If this is provided, all lines that start with
      this character will not be parsed.
    num_epochs: An int specifying the number of times this dataset is repeated.
      If None, cycles through the dataset forever.
    shuffle: A bool that indicates whether the input should be shuffled.
    shuffle_buffer_size: Buffer size to use for shuffling. A large buffer size
      ensures better shuffling, but would increase memory usage and startup
      time.
    shuffle_seed: Randomization seed to use for shuffling.
    prefetch_buffer_size: An int specifying the number of feature batches to
      prefetch for performance improvement. Recommended value is the number of
      batches consumed per training step.
    num_parallel_reads: Number of threads used to read CSV records from files.
      If >1, the results will be interleaved.
    num_parallel_parser_calls: Number of parallel invocations of the CSV parsing
      function on CSV records.
    sloppy: If `True`, reading performance will be improved at
      the cost of non-deterministic ordering. If `False`, the order of elements
      produced is deterministic prior to shuffling (elements are still
      randomized if `shuffle=True`. Note that if the seed is set, then order
      of elements after shuffling is deterministic). Defaults to `False`.
    default_float_type: Either `tf.float32` or `tf.float64`. If defaults are
      not provided, float-like strings are interpreted to be this type.
    num_rows_for_inference: Number of rows of a file to use for type inference
      if record_defaults is not provided. If None, reads all the rows of all
      the files. Defaults to 100.

  Returns:
    A dataset, where each element is a (features, labels) tuple that corresponds
    to a batch of `batch_size` CSV rows. The features dictionary maps feature
    column names to `Tensor`s containing the corresponding column data, and
    labels is a `Tensor` containing the column data for the label column
    specified by `label_name`.

  Raises:
    ValueError: If any of the arguments is malformed.
  """
  # Create dataset of all matching filenames
  filenames = _get_file_names(file_pattern, False)
  dataset = dataset_ops.Dataset.from_tensor_slices(filenames)
  if shuffle:
    dataset = dataset.shuffle(len(filenames), shuffle_seed)

  # Clean arguments; figure out column names and defaults
  if comment is not None and len(comment) != 1:
    raise ValueError("`comment` arg must be a single-character string or None")

  if column_names is None:
    if not header:
      raise ValueError("Cannot infer column names without a header line.")
    # If column names are not provided, infer from the header lines
    column_names = _infer_column_names(filenames, field_delim, use_quote_delim)
  if len(column_names) != len(set(column_names)):
    raise ValueError("Cannot have duplicate column names.")

  if column_defaults is not None:
    column_defaults = [
        constant_op.constant([], dtype=x) if x in _ACCEPTABLE_CSV_TYPES else x
        for x in column_defaults
    ]
  else:
    # If column defaults are not provided, infer from records at graph
    # construction time
    column_defaults = _infer_column_defaults(
        filenames, len(column_names), field_delim, use_quote_delim, na_value,
        header, comment, default_float_type, num_rows_for_inference)

  if label_name is not None and label_name not in column_names:
    raise ValueError("`label_name` provided must be one of the columns.")

  # Define map and filter functions
  def filter_fn(line):
    return math_ops.not_equal(string_ops.substr(line, 0, 1), comment)

  def filename_to_dataset(filename):
    ds = core_readers.TextLineDataset(filename)
    if header:
      ds = ds.skip(1)
    if comment is not None:
      ds = ds.filter(filter_fn)
    return ds

  def decode_csv(line):
    """Decodes CSV line into features.

    Args:
      line: String tensor corresponding to one csv record.
    Returns:
      A dictionary of feature names to values for that particular record. If
      label_name is provided, extracts the label feature to be returned as the
      second element of the tuple.
    """
    columns = parsing_ops.decode_csv(
        line,
        column_defaults,
        field_delim=field_delim,
        use_quote_delim=use_quote_delim,
        na_value=na_value,
    )
    features = dict(zip(column_names, columns))
    if label_name is not None:
      label = features.pop(label_name)
      return features, label
    return features

  # Read files sequentially or in parallel
  dataset = dataset.apply(
      interleave_ops.parallel_interleave(
          filename_to_dataset, cycle_length=num_parallel_reads, sloppy=sloppy))

  if num_epochs != 1 and shuffle:
    # Use shuffle_and_repeat for perf
    dataset = dataset.apply(
        shuffle_ops.shuffle_and_repeat(shuffle_buffer_size, num_epochs,
                                       shuffle_seed))
  elif shuffle:
    dataset = dataset.shuffle(shuffle_buffer_size, shuffle_seed)
  elif num_epochs != 1:
    dataset = dataset.repeat(num_epochs)

  # Use map_and_batch for perf
  # TODO(b/76425672): use num_parallel_calls for better performance tuning when
  # that is added
  dataset = dataset.apply(
      batching.map_and_batch(
          map_func=decode_csv,
          batch_size=batch_size,
          num_parallel_batches=int(
              ceil(num_parallel_parser_calls / batch_size))))

  dataset = dataset.prefetch(prefetch_buffer_size)
  return dataset
Beispiel #44
0
def make_tf_record_dataset(
    file_pattern,
    batch_size,
    parser_fn=None,
    num_epochs=None,
    shuffle=True,
    shuffle_buffer_size=None,
    shuffle_seed=None,
    prefetch_buffer_size=None,
    num_parallel_reads=None,
    num_parallel_parser_calls=None,
    drop_final_batch=False):
  """Reads and optionally parses TFRecord files into a dataset.

  Provides common functionality such as batching, optional parsing, shuffling,
  and performant defaults.

  Args:
    file_pattern: List of files or patterns of TFRecord file paths.
      See @{tf.gfile.Glob} for pattern rules.
    batch_size: An int representing the number of records to combine
      in a single batch.
    parser_fn: (Optional.) A function accepting string input to parse
      and process the record contents. This function must map records
      to components of a fixed shape, so they may be batched. By
      default, uses the record contents unmodified.
    num_epochs: (Optional.) An int specifying the number of times this
      dataset is repeated.  If None (the default), cycles through the
      dataset forever.
    shuffle: (Optional.) A bool that indicates whether the input
      should be shuffled. Defaults to `True`.
    shuffle_buffer_size: (Optional.) Buffer size to use for
      shuffling. A large buffer size ensures better shuffling, but
      increases memory usage and startup time.
    shuffle_seed: (Optional.) Randomization seed to use for shuffling.
    prefetch_buffer_size: (Optional.) An int specifying the number of
      feature batches to prefetch for performance improvement.
      Defaults to auto-tune. Set to 0 to disable prefetching.
    num_parallel_reads: (Optional.) Number of threads used to read
      records from files. By default or if set to a value >1, the
      results will be interleaved.
    num_parallel_parser_calls: (Optional.) Number of parallel
      records to parse in parallel. Defaults to an automatic selection.
    drop_final_batch: (Optional.) Whether the last batch should be
      dropped in case its size is smaller than `batch_size`; the
      default behavior is not to drop the smaller batch.

  Returns:
    A dataset, where each element matches the output of `parser_fn`
    except it will have an additional leading `batch-size` dimension,
    or a `batch_size`-length 1-D tensor of strings if `parser_fn` is
    unspecified.
  """
  files = dataset_ops.Dataset.list_files(
      file_pattern, shuffle=shuffle, seed=shuffle_seed)

  if num_parallel_reads is None:
    # Note: We considered auto-tuning this value, but there is a concern
    # that this affects the mixing of records from different files, which
    # could affect training convergence/accuracy, so we are defaulting to
    # a constant for now.
    num_parallel_reads = 24
  dataset = core_readers.TFRecordDataset(
      files, num_parallel_reads=num_parallel_reads)

  if shuffle_buffer_size is None:
    # TODO(josh11b): Auto-tune this value when not specified
    shuffle_buffer_size = 10000
  dataset = _maybe_shuffle_and_repeat(
      dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)

  if parser_fn is None:
    if drop_final_batch:
      dataset = dataset.apply(batching.batch_and_drop_remainder(batch_size))
    else:
      dataset = dataset.batch(batch_size)
  else:
    # TODO(josh11b): if num_parallel_parser_calls is None, use some function
    # of num cores instead of map_and_batch's default behavior of one batch.
    dataset = dataset.apply(batching.map_and_batch(
        parser_fn, batch_size, num_parallel_calls=num_parallel_parser_calls,
        drop_remainder=drop_final_batch))

  if prefetch_buffer_size is None:
    prefetch_buffer_size = -1  # tf.config.data.AUTOTUNE
  if prefetch_buffer_size == 0:
    return dataset
  else:
    return dataset.prefetch(buffer_size=prefetch_buffer_size)
Beispiel #45
0
    def minibatch(self,
                  dataset,
                  subset,
                  use_datasets,
                  cache_data,
                  shift_ratio=-1):
        if shift_ratio < 0:
            shift_ratio = self.shift_ratio
        with tf.name_scope('batch_processing'):
            # Build final results per split.
            images = [[] for _ in range(self.num_splits)]
            labels = [[] for _ in range(self.num_splits)]
            if use_datasets:
                glob_pattern = dataset.tf_record_pattern(subset)
                file_names = gfile.Glob(glob_pattern)
                if not file_names:
                    raise ValueError(
                        'Found no files in --data_dir matching: {}'.format(
                            glob_pattern))
                ds = tf.data.TFRecordDataset.list_files(file_names)
                ds = ds.apply(
                    interleave_ops.parallel_interleave(tf.data.TFRecordDataset,
                                                       cycle_length=10))
                if cache_data:
                    ds = ds.take(1).cache().repeat()
                counter = tf.data.Dataset.range(self.batch_size)
                counter = counter.repeat()
                ds = tf.data.Dataset.zip((ds, counter))
                ds = ds.prefetch(buffer_size=self.batch_size)
                ds = ds.shuffle(buffer_size=10000)
                ds = ds.repeat()
                ds = ds.apply(
                    batching.map_and_batch(
                        map_func=self.parse_and_preprocess,
                        batch_size=self.batch_size_per_split,
                        num_parallel_batches=self.num_splits))
                ds = ds.prefetch(buffer_size=self.num_splits)
                ds_iterator = ds.make_one_shot_iterator()
                for d in xrange(self.num_splits):
                    labels[d], images[d] = ds_iterator.get_next()

            else:
                record_input = data_flow_ops.RecordInput(
                    file_pattern=dataset.tf_record_pattern(subset),
                    seed=301,
                    parallelism=64,
                    buffer_size=10000,
                    batch_size=self.batch_size,
                    shift_ratio=shift_ratio,
                    name='record_input')
                records = record_input.get_yield_op()
                records = tf.split(records, self.batch_size, 0)
                records = [tf.reshape(record, []) for record in records]
                for idx in xrange(self.batch_size):
                    value = records[idx]
                    (label, image) = self.parse_and_preprocess(value, idx)
                    split_index = idx % self.num_splits
                    labels[split_index].append(label)
                    images[split_index].append(image)

            for split_index in xrange(self.num_splits):
                if not use_datasets:
                    images[split_index] = tf.parallel_stack(
                        images[split_index])
                    labels[split_index] = tf.concat(labels[split_index], 0)
                images[split_index] = tf.cast(images[split_index], self.dtype)
                depth = 3
                images[split_index] = tf.reshape(images[split_index],
                                                 shape=[
                                                     self.batch_size_per_split,
                                                     self.height, self.width,
                                                     depth
                                                 ])
                labels[split_index] = tf.reshape(labels[split_index],
                                                 [self.batch_size_per_split])
            return images, labels
Beispiel #46
0
    def __init__(self, split, batchsize, idx, num_workers, rescale=1):
        IMAGENET_NUM_TRAIN_IMAGES = 1281167
        IMAGENET_NUM_VAL_IMAGES = 50000

        self.rescale = rescale

        if split == "train":
            im_length = IMAGENET_NUM_TRAIN_IMAGES
            records_to_skip = im_length * idx // num_workers
            records_to_read = im_length * (idx +
                                           1) // num_workers - records_to_skip
        else:
            im_length = IMAGENET_NUM_VAL_IMAGES

        self.curr_sample = 0

        index_path = osp.join(FLAGS.imagenet_datadir, 'index.json')
        with open(index_path) as f:
            metadata = json.load(f)
            counts = metadata['record_counts']

        if split == 'train':
            file_names = list(
                sorted([x for x in counts.keys() if x.startswith('train')]))

            result_records_to_skip = None
            files = []
            for filename in file_names:
                records_in_file = counts[filename]
                if records_to_skip >= records_in_file:
                    records_to_skip -= records_in_file
                    continue
                elif records_to_read > 0:
                    if result_records_to_skip is None:
                        # Record the number to skip in the first file
                        result_records_to_skip = records_to_skip
                    files.append(filename)
                    records_to_read -= (records_in_file - records_to_skip)
                    records_to_skip = 0
                else:
                    break
        else:
            files = list(
                sorted(
                    [x for x in counts.keys() if x.startswith('validation')]))

        files = [osp.join(FLAGS.imagenet_datadir, x) for x in files]
        preprocess_function = ImagenetPreprocessor(
            128, dtype=tf.float32, train=False).parse_and_preprocess

        ds = tf.data.TFRecordDataset.from_generator(lambda: files,
                                                    output_types=tf.string)
        ds = ds.apply(tf.data.TFRecordDataset)
        ds = ds.take(im_length)
        ds = ds.prefetch(buffer_size=FLAGS.batch_size)
        ds = ds.apply(tf.contrib.data.shuffle_and_repeat(buffer_size=10000))
        ds = ds.apply(
            batching.map_and_batch(map_func=preprocess_function,
                                   batch_size=FLAGS.batch_size,
                                   num_parallel_batches=4))
        ds = ds.prefetch(buffer_size=2)

        ds_iterator = ds.make_initializable_iterator()
        labels, images = ds_iterator.get_next()
        self.images = tf.clip_by_value(
            images / 256 + tf.random_uniform(tf.shape(images), 0, 1. / 256),
            0.0, 1.0)
        self.labels = labels

        config = tf.ConfigProto(device_count={'GPU': 0})
        sess = tf.Session(config=config)
        sess.run(ds_iterator.initializer)

        self.im_length = im_length // batchsize

        self.sess = sess