def testPaddedBatchAndDropRemainder(self):
    els = []
    for length in [3, 6, 9, 4, 12, 10, 2]:
      els.append((np.array(length), np.arange(length) + 1,
                  np.array(length * 2)))

    dataset = dataset_ops.Dataset.from_tensors(els[0])
    for el in els[1:]:
      dataset = dataset.concatenate(dataset_ops.Dataset.from_tensors(el))

    batch_size = array_ops.placeholder(dtypes.int64, shape=[])
    iterator = (
        dataset.apply(
            batching.padded_batch_and_drop_remainder(
                batch_size, ([], [None], []))).make_initializable_iterator())

    next_element = iterator.get_next()

    with self.cached_session() as sess:
      for test_batch_size in [1, 3, 7, 10]:
        sess.run(iterator.initializer, feed_dict={batch_size: test_batch_size})
        num_batches = 7 // test_batch_size
        for i in range(num_batches):
          result = sess.run(next_element)
          for component_idx, result_component in enumerate(result):
            for j in range(test_batch_size):
              data_idx = i * test_batch_size + j
              comp = result_component[j]
              unpadded = comp[comp > 0]
              if np.isscalar(comp):
                # The boolean mask indexing above adds a dim back. Rm it.
                unpadded = unpadded[0]
              self.assertAllEqual(els[data_idx][component_idx], unpadded)
        with self.assertRaises(errors.OutOfRangeError):
          sess.run(next_element)
示例#2
0
  def testPaddedBatchAndDropRemainder(self):
    els = []
    for length in [3, 6, 9, 4, 12, 10, 2]:
      els.append((np.array(length), np.arange(length) + 1,
                  np.array(length * 2)))

    dataset = dataset_ops.Dataset.from_tensors(els[0])
    for el in els[1:]:
      dataset = dataset.concatenate(dataset_ops.Dataset.from_tensors(el))

    batch_size = array_ops.placeholder(dtypes.int64, shape=[])
    iterator = (
        dataset.apply(
            batching.padded_batch_and_drop_remainder(
                batch_size, ([], [None], []))).make_initializable_iterator())

    next_element = iterator.get_next()

    with self.test_session() as sess:
      for test_batch_size in [1, 3, 7, 10]:
        sess.run(iterator.initializer, feed_dict={batch_size: test_batch_size})
        num_batches = 7 // test_batch_size
        for i in range(num_batches):
          result = sess.run(next_element)
          for component_idx, result_component in enumerate(result):
            for j in range(test_batch_size):
              data_idx = i * test_batch_size + j
              comp = result_component[j]
              unpadded = comp[comp > 0]
              if np.isscalar(comp):
                # The boolean mask indexing above adds a dim back. Rm it.
                unpadded = unpadded[0]
              self.assertAllEqual(els[data_idx][component_idx], unpadded)
        with self.assertRaises(errors.OutOfRangeError):
          sess.run(next_element)
  def testPaddedBatchAndDropRemainderSparseError(self):

    def _map_fn(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i

    with self.assertRaises(TypeError):
      _ = dataset_ops.Dataset.range(10).map(_map_fn).apply(
          batching.padded_batch_and_drop_remainder(5))
示例#4
0
  def testPaddedBatchAndDropRemainderSparseError(self):

    def _map_fn(i):
      return sparse_tensor.SparseTensorValue(
          indices=[[0, 0]], values=(i * [1]), dense_shape=[1, 1]), i

    with self.assertRaises(TypeError):
      _ = dataset_ops.Dataset.range(10).map(_map_fn).apply(
          batching.padded_batch_and_drop_remainder(5))