def testCopyToDevicePingPongCPUGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with compat.forward_compatibility_horizon(2018, 8, 4): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0")) back_to_cpu_dataset = device_dataset.apply( prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0")) with ops.device("/cpu:0"): iterator = dataset_ops.make_initializable_iterator( back_to_cpu_dataset) next_element = iterator.get_next() with self.cached_session(config=config_pb2.ConfigProto( allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def __init__(self, dataset): """Creates a new iterator over the given dataset. For example: ```python dataset = tf.data.Dataset.range(4) for x in Iterator(dataset): print(x) ``` Tensors produced will be placed on the device on which this iterator object was created. Args: dataset: A `tf.data.Dataset` object. Raises: TypeError: If `dataset` is an unsupported type. RuntimeError: When invoked without eager execution enabled. """ if not context.context().device_spec.device_type: is_remote_device = False else: is_remote_device = context.context().device_spec.device_type != "CPU" if is_remote_device: with ops.device(None): # Let the placer figure out where to place the various functions etc. # created by the CopyToDeviceDataset. dataset = dataset.apply(prefetching_ops.copy_to_device( context.context().device_name)) dataset = dataset.prefetch(1) super(Iterator, self).__init__(dataset)
def testInsideFunction(self): if test_util.is_gpu_available(): self.skipTest( "b/123899495: Colocation errors for critical sections in map on GPU") cs = critical_section_ops.CriticalSection() with ops.device("/gpu:0" if test_util.is_gpu_available() else "/cpu:0"): v = resource_variable_ops.ResourceVariable(1) def fn(): return v.read_value() # map() creates a TensorFlow function. ds = dataset_ops.Dataset.range(1) if test_util.is_gpu_available(): ds = (ds.apply(prefetching_ops.copy_to_device("/gpu:0")) .apply(prefetching_ops.map_on_gpu(lambda _: cs.execute(fn)))) else: ds = ds.map(lambda _: cs.execute(fn)) def get_first(): if context.executing_eagerly(): return self.evaluate(ds.make_one_shot_iterator().get_next()) itr = ds.make_initializable_iterator() self.evaluate([v.initializer, itr.initializer]) return self.evaluate(itr.get_next()) self.assertEqual(1, get_first())
def testCopyToSameDevice(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:0")) with ops.device("/cpu:0"): iterator = device_dataset.make_one_shot_iterator() next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config) as sess: for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyDictToDeviceWithPrefetch(self): host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x}) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( dataset_ops.get_structure(host_dataset).is_compatible_with( dataset_ops.get_structure(device_dataset))) self.assertTrue( dataset_ops.get_structure(host_dataset).is_compatible_with( dataset_ops.get_structure(iterator))) self.assertEqual(dtypes.int64, next_element["a"].dtype) self.assertEqual([], next_element["a"].shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual({"a": i}, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def __init__(self, dataset): """Creates a new iterator over the given dataset. For example: ```python dataset = tf.data.Dataset.range(4) for x in Iterator(dataset): print(x) ``` Tensors produced will be placed on the device on which this iterator object was created. Args: dataset: A `tf.data.Dataset` object. Raises: TypeError: If `dataset` is an unsupported type. RuntimeError: When invoked without eager execution enabled. """ if not context.context().device_spec.device_type: is_remote_device = False else: is_remote_device = context.context( ).device_spec.device_type != "CPU" if is_remote_device: with ops.device(None): # Let the placer figure out where to place the various functions etc. # created by the CopyToDeviceDataset. dataset = dataset.apply( prefetching_ops.copy_to_device( context.context().device_name)) dataset = dataset.prefetch(1) super(Iterator, self).__init__(dataset)
def testCopySparseTensorsToDeviceWithPrefetch(self): def make_tensor(i): return sparse_tensor.SparseTensorValue(indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2]) host_dataset = dataset_ops.Dataset.range(10).map(make_tensor) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): actual = self.evaluate(next_element) self.assertAllEqual([i], actual.values) self.assertAllEqual([[0, 0]], actual.indices) self.assertAllEqual([2, 2], actual.dense_shape) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testIteratorGetNextAsOptionalOnGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(3) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = device_dataset.make_initializable_iterator() next_elem = iterator_ops.get_next_as_optional(iterator) elem_has_value_t = next_elem.has_value() elem_value_t = next_elem.get_value() with self.cached_session() as sess: # Before initializing the iterator, evaluating the optional fails with # a FailedPreconditionError. with self.assertRaises(errors.FailedPreconditionError): sess.run(elem_has_value_t) with self.assertRaises(errors.FailedPreconditionError): sess.run(elem_value_t) # For each element of the dataset, assert that the optional evaluates to # the expected value. sess.run(iterator.initializer) for i in range(3): elem_has_value, elem_value = sess.run([elem_has_value_t, elem_value_t]) self.assertTrue(elem_has_value) self.assertEqual(i, elem_value) # After exhausting the iterator, `next_elem.has_value()` will evaluate to # false, and attempting to get the value will fail. for _ in range(2): self.assertFalse(sess.run(elem_has_value_t)) with self.assertRaises(errors.InvalidArgumentError): sess.run(elem_value_t)
def testCopyToDeviceGpuWithMap(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") def generator(): for i in range(10): yield i, float(i), str(i) host_dataset = dataset_ops.Dataset.from_generator( generator, output_types=(dtypes.int32, dtypes.float32, dtypes.string)) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) def gpu_map_func(x, y, z): return math_ops.square(x), math_ops.square(y), z device_dataset = device_dataset.apply( prefetching_ops.map_on_gpu(gpu_map_func)) options = dataset_ops.Options() options.experimental_autotune = False device_dataset = device_dataset.with_options(options) with ops.device("/gpu:0"): iterator = device_dataset.make_initializable_iterator() next_element = iterator.get_next() with self.cached_session() as sess: sess.run(iterator.initializer) for i in range(10): x, y, z = sess.run(next_element) self.assertEqual(i**2, x) self.assertEqual(float(i**2), y) self.assertEqual(util_compat.as_bytes(str(i)), z) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopySparseTensorsToDeviceWithPrefetch(self): def make_tensor(i): return sparse_tensor.SparseTensorValue( indices=[[0, 0]], values=(i * [1]), dense_shape=[2, 2]) host_dataset = dataset_ops.Dataset.range(10).map(make_tensor) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = device_dataset.make_one_shot_iterator() next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int64, next_element.dtype) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config) as sess: for i in range(10): actual = sess.run(next_element) self.assertAllEqual([i], actual.values) self.assertAllEqual([[0, 0]], actual.indices) self.assertAllEqual([2, 2], actual.dense_shape) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopyToDeviceWithReInitAndPrefetch(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testInsideFunction(self): if test_util.is_gpu_available(): self.skipTest( "b/123899495: Colocation errors for critical sections in map on GPU" ) cs = critical_section_ops.CriticalSection() with ops.device( "/gpu:0" if test_util.is_gpu_available() else "/cpu:0"): v = resource_variable_ops.ResourceVariable(1) def fn(): return v.read_value() # map() creates a TensorFlow function. ds = dataset_ops.Dataset.range(1) if test_util.is_gpu_available(): ds = (ds.apply(prefetching_ops.copy_to_device("/gpu:0")).apply( prefetching_ops.map_on_gpu(lambda _: cs.execute(fn)))) else: ds = ds.map(lambda _: cs.execute(fn)) def get_first(): if context.executing_eagerly(): return self.evaluate( dataset_ops.make_one_shot_iterator(ds).get_next()) itr = dataset_ops.make_initializable_iterator(ds) self.evaluate([v.initializer, itr.initializer]) return self.evaluate(itr.get_next()) self.assertEqual(1, get_first())
def testCopyToDeviceWithReInitAndPrefetch(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue( structure.are_compatible( dataset_ops.get_structure(host_dataset), dataset_ops.get_structure(device_dataset))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.evaluate(iterator.initializer) for i in range(5): self.assertEqual(i, self.evaluate(next_element)) self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyDictToDeviceWithPrefetch(self): host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x}) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int64, next_element["a"].dtype) self.assertEqual([], next_element["a"].shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual({"a": i}, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyDictToDeviceWithPrefetch(self): host_dataset = dataset_ops.Dataset.range(10).map(lambda x: {"a": x}) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = device_dataset.make_one_shot_iterator() next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int64, next_element["a"].dtype) self.assertEqual([], next_element["a"].shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config) as sess: for i in range(10): self.assertEqual({"a": i}, sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopyToDeviceInt32(self): host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = device_dataset.make_one_shot_iterator() next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int32, next_element.dtype) self.assertEqual((4, ), next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config) as sess: self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToSameDevice(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:0")) with ops.device("/cpu:0"): iterator = device_dataset.make_one_shot_iterator() next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config) as sess: for i in range(10): self.assertEqual(i, sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopyToDeviceWithReInitAndPrefetch(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")).prefetch(1) with ops.device("/cpu:1"): iterator = device_dataset.make_initializable_iterator() next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config) as sess: sess.run(iterator.initializer) for i in range(5): self.assertEqual(i, sess.run(next_element)) sess.run(iterator.initializer) for i in range(10): self.assertEqual(i, sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopyToDeviceGpuWithPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): self.assertDatasetProduces(device_dataset, list(range(10)))
def copy_to_device(target_device, source_device="/cpu:0"): """A transformation that copies dataset elements to the given `target_device`. Args: target_device: The name of a device to which elements will be copied. source_device: The original device on which `input_dataset` will be placed. Returns: A `Dataset` transformation function, which can be passed to `tf.data.Dataset.apply`. """ return prefetching_ops.copy_to_device(target_device, source_device)
def testCopyToDevicePingPongCPUGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with compat.forward_compatibility_horizon(2018, 8, 4): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0", source_device="/cpu:0")) back_to_cpu_dataset = device_dataset.apply( prefetching_ops.copy_to_device("/cpu:0", source_device="/gpu:0")) with ops.device("/cpu:0"): iterator = back_to_cpu_dataset.make_initializable_iterator() next_element = iterator.get_next() with self.cached_session() as sess: sess.run(iterator.initializer) for i in range(10): self.assertEqual(i, sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopyToDeviceGpuInt32AndPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): iterator = device_dataset.make_initializable_iterator() next_element = iterator.get_next() with self.cached_session() as sess: self.evaluate(iterator.initializer) self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToDeviceGpuStringsAndPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = device_dataset.make_initializable_iterator() next_element = iterator.get_next() with self.cached_session() as sess: sess.run(iterator.initializer) self.assertAllEqual([b"a", b"b", b"c"], sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopyToDeviceHostOptimizations(self): host_dataset = dataset_ops.Dataset.range(10) host_dataset = host_dataset.apply(testing.assert_next(["MapAndBatch"])) host_dataset = host_dataset.map(lambda x: x * x).batch(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.assertAllEqual([x * x for x in range(10)], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToDeviceGpuStrings(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToDeviceGpuWithPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): iterator = device_dataset.make_initializable_iterator() next_element = iterator.get_next() with self.cached_session() as sess: sess.run(iterator.initializer) for i in range(10): self.assertEqual(i, sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)
def testCopyToDeviceGpu(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = device_dataset.make_initializable_iterator() next_element = iterator.get_next() with self.cached_session() as sess: self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToDeviceGpuStringsAndPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.from_tensors(["a", "b", "c"]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) self.assertAllEqual([b"a", b"b", b"c"], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToDeviceGpuWithPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session( config=config_pb2.ConfigProto(allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToDeviceGpuWithPrefetch(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/gpu:0")).prefetch(1) with ops.device("/gpu:0"): iterator = dataset_ops.make_initializable_iterator(device_dataset) next_element = iterator.get_next() with self.cached_session(config=config_pb2.ConfigProto( allow_soft_placement=False)): self.evaluate(iterator.initializer) for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def __init__(self, dataset): """Creates a new iterator over the given dataset. For example: ```python dataset = tf.data.Dataset.range(4) for x in Iterator(dataset): print(x) ``` Tensors produced will be placed on the device on which this iterator object was created. Args: dataset: A `tf.data.Dataset` object. Raises: TypeError: If `dataset` is an unsupported type. RuntimeError: When invoked without eager execution enabled. """ # pylint: disable=protected-access if (isinstance(dataset, prefetching_ops._PrefetchToDeviceDataset) or (isinstance(dataset, dataset_ops.DatasetV1Adapter) and isinstance( dataset._dataset, prefetching_ops._PrefetchToDeviceDataset))): raise TypeError( "`tf.data.experimental.prefetch_to_device()` is not compatible with " "`tf.contrib.eager.Iterator`. Use `for ... in dataset:` to iterate " "over the dataset instead.") # pylint: enable=protected-access if not context.context().device_spec.device_type: is_remote_device = False else: is_remote_device = context.context().device_spec.device_type != "CPU" if is_remote_device: with ops.device(None): # Let the placer figure out where to place the various functions etc. # created by the CopyToDeviceDataset. dataset = dataset.apply(prefetching_ops.copy_to_device( context.context().device_name)) dataset = dataset.prefetch(1) super(Iterator, self).__init__(dataset)
def testCopyToDeviceInt32(self): host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with( dataset_ops.get_structure(device_dataset))) self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with( dataset_ops.get_structure(iterator))) self.assertEqual(dtypes.int32, next_element.dtype) self.assertEqual((4,), next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): self.assertAllEqual([0, 1, 2, 3], self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToSameDevice(self): host_dataset = dataset_ops.Dataset.range(10) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:0")) with ops.device("/cpu:0"): iterator = dataset_ops.make_one_shot_iterator(device_dataset) next_element = iterator.get_next() self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with( dataset_ops.get_structure(device_dataset))) self.assertTrue(dataset_ops.get_structure(host_dataset).is_compatible_with( dataset_ops.get_structure(iterator))) self.assertEqual(dtypes.int64, next_element.dtype) self.assertEqual([], next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config): for i in range(10): self.assertEqual(i, self.evaluate(next_element)) with self.assertRaises(errors.OutOfRangeError): self.evaluate(next_element)
def testCopyToDeviceInt32(self): host_dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]) device_dataset = host_dataset.apply( prefetching_ops.copy_to_device("/cpu:1")) with ops.device("/cpu:1"): iterator = device_dataset.make_one_shot_iterator() next_element = iterator.get_next() self.assertEqual(host_dataset.output_types, device_dataset.output_types) self.assertEqual(host_dataset.output_types, iterator.output_types) self.assertEqual(host_dataset.output_shapes, device_dataset.output_shapes) self.assertEqual(host_dataset.output_shapes, iterator.output_shapes) self.assertEqual(host_dataset.output_classes, device_dataset.output_classes) self.assertEqual(host_dataset.output_classes, iterator.output_classes) self.assertEqual(dtypes.int32, next_element.dtype) self.assertEqual((4,), next_element.shape) worker_config = config_pb2.ConfigProto(device_count={"CPU": 2}) with self.test_session(config=worker_config) as sess: self.assertAllEqual([0, 1, 2, 3], sess.run(next_element)) with self.assertRaises(errors.OutOfRangeError): sess.run(next_element)