def _next_internal(self): """Returns a nested structure of `tf.Tensor`s containing the next element. """ with ops.device(self._device): if self._buffer_resource_handle is not None: ret = prefetching_ops.function_buffering_resource_get_next( function_buffer_resource=self._buffer_resource_handle, output_types=self._flat_output_types) else: # TODO(ashankar): Consider removing this ops.device() contextmanager # and instead mimic ops placement in graphs: Operations on resource # handles execute on the same device as where the resource is placed. # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next` # because in eager mode this code will run synchronously on the calling # thread. Therefore we do not need to make a defensive context switch # to a background thread, and can achieve a small constant performance # boost by invoking the iterator synchronously. ret = gen_dataset_ops.iterator_get_next_sync( self._resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) return sparse.deserialize_sparse_tensors( nest.pack_sequence_as(self._output_types, ret), self._output_types, self._output_shapes, self._output_classes)
def _next_internal(self): """Returns a nested structure of `tf.Tensor`s containing the next element. """ with ops.device(self._device): if self._buffer_resource_handle is not None: ret = prefetching_ops.function_buffering_resource_get_next( function_buffer_resource=self._buffer_resource_handle, output_types=self._flat_output_types) else: # TODO(ashankar): Consider removing this ops.device() contextmanager # and instead mimic ops placement in graphs: Operations on resource # handles execute on the same device as where the resource is placed. # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next` # because in eager mode this code will run synchronously on the calling # thread. Therefore we do not need to make a defensive context switch # to a background thread, and can achieve a small constant performance # boost by invoking the iterator synchronously. ret = gen_dataset_ops.iterator_get_next_sync( self._resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) return sparse.deserialize_sparse_tensors( nest.pack_sequence_as(self._output_types, ret), self._output_types, self._output_shapes, self._output_classes)
def _next_internal(self): """Returns a nested structure of `tf.Tensor`s containing the next element. """ if not context.executing_eagerly(): with ops.device(self._device): ret = gen_dataset_ops.iterator_get_next( self._iterator_resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) return structure.from_compatible_tensor_list(self._element_spec, ret) # This runs in sync mode as iterators use an error status to communicate # that there is no more data to iterate over. # TODO(b/77291417): Fix with context.execution_mode(context.SYNC): with ops.device(self._device): # TODO(ashankar): Consider removing this ops.device() contextmanager # and instead mimic ops placement in graphs: Operations on resource # handles execute on the same device as where the resource is placed. # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next` # because in eager mode this code will run synchronously on the calling # thread. Therefore we do not need to make a defensive context switch # to a background thread, and can achieve a small constant performance # boost by invoking the iterator synchronously. ret = gen_dataset_ops.iterator_get_next_sync( self._iterator_resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) try: # Fast path for the case `self._structure` is not a nested structure. return self._element_spec._from_compatible_tensor_list(ret) # pylint: disable=protected-access except AttributeError: return structure.from_compatible_tensor_list(self._element_spec, ret)
def _next_internal(self): """Returns a nested structure of `tf.Tensor`s containing the next element. """ if not context.executing_eagerly(): with ops.device(self._device): ret = gen_dataset_ops.iterator_get_next( self._iterator_resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) return self._structure._from_compatible_tensor_list(ret) # pylint: disable=protected-access # This runs in sync mode as iterators use an error status to communicate # that there is no more data to iterate over. # TODO(b/77291417): Fix with context.execution_mode(context.SYNC): with ops.device(self._device): # TODO(ashankar): Consider removing this ops.device() contextmanager # and instead mimic ops placement in graphs: Operations on resource # handles execute on the same device as where the resource is placed. # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next` # because in eager mode this code will run synchronously on the calling # thread. Therefore we do not need to make a defensive context switch # to a background thread, and can achieve a small constant performance # boost by invoking the iterator synchronously. ret = gen_dataset_ops.iterator_get_next_sync( self._iterator_resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) return self._structure._from_compatible_tensor_list(ret) # pylint: disable=protected-access
def _next_internal(self): """Returns a nested structure of `tf.Tensor`s containing the next element. """ # This runs in sync mode as iterators use an error status to communicate # that there is no more data to iterate over. # TODO(b/77291417): Fix with context.execution_mode(context.SYNC): with ops.device(self._device): # TODO(ashankar): Consider removing this ops.device() contextmanager # and instead mimic ops placement in graphs: Operations on resource # handles execute on the same device as where the resource is placed. # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next` # because in eager mode this code will run synchronously on the calling # thread. Therefore we do not need to make a defensive context switch # to a background thread, and can achieve a small constant performance # boost by invoking the iterator synchronously. ret = gen_dataset_ops.iterator_get_next_sync( self._resource, output_types=self._flat_output_types, output_shapes=self._flat_output_shapes) return sparse.deserialize_sparse_tensors( nest.pack_sequence_as(self._output_types, ret), self._output_types, self._output_shapes, self._output_classes)