示例#1
0
    def _next_internal(self):
        autograph_status = autograph_ctx.control_status_ctx().status
        autograph_disabled = autograph_status == autograph_ctx.Status.DISABLED
        if not context.executing_eagerly() and autograph_disabled:
            self._get_next_call_count += 1
            if self._get_next_call_count > GET_NEXT_CALL_ERROR_THRESHOLD:
                raise ValueError(GET_NEXT_CALL_ERROR_MESSAGE)

        if not context.executing_eagerly():
            # TODO(b/169442955): Investigate the need for this colocation constraint.
            with ops.colocate_with(self._iterator_resource):
                ret = gen_dataset_ops.iterator_get_next(
                    self._iterator_resource,
                    output_types=self._flat_output_types,
                    output_shapes=self._flat_output_shapes)
            return structure.from_compatible_tensor_list(
                self._element_spec, ret)

        # TODO(b/77291417): This runs in sync mode as iterators use an error status
        # to communicate that there is no more data to iterate over.
        with context.execution_mode(context.SYNC):
            ret = gen_dataset_ops.iterator_get_next(
                self._iterator_resource,
                output_types=self._flat_output_types,
                output_shapes=self._flat_output_shapes)

            try:
                # Fast path for the case `self._structure` is not a nested structure.
                return self._element_spec._from_compatible_tensor_list(ret)  # pylint: disable=protected-access
            except AttributeError:
                return structure.from_compatible_tensor_list(
                    self._element_spec, ret)
示例#2
0
    def _next_internal(self):
        if not context.executing_eagerly():
            # TODO(b/169442955): Investigate the need for this colocation constraint.
            with ops.colocate_with(self._iterator_resource):
                ret = gen_dataset_ops.iterator_get_next(
                    self._iterator_resource,
                    output_types=self._flat_output_types,
                    output_shapes=self._flat_output_shapes)
            return structure.from_compatible_tensor_list(
                self._element_spec, ret)

        # TODO(b/77291417): This runs in sync mode as iterators use an error status
        # to communicate that there is no more data to iterate over.
        with context.execution_mode(context.SYNC):
            ret = gen_dataset_ops.iterator_get_next(
                self._iterator_resource,
                output_types=self._flat_output_types,
                output_shapes=self._flat_output_shapes)

            try:
                # Fast path for the case `self._structure` is not a nested structure.
                return self._element_spec._from_compatible_tensor_list(ret)  # pylint: disable=protected-access
            except AttributeError:
                return structure.from_compatible_tensor_list(
                    self._element_spec, ret)
示例#3
0
    def _next_internal(self):
        """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
        if not context.executing_eagerly():
            with ops.device(self._device):
                ret = gen_dataset_ops.iterator_get_next(
                    self._iterator_resource,
                    output_types=self._flat_output_types,
                    output_shapes=self._flat_output_shapes)
            return structure.from_compatible_tensor_list(
                self._element_spec, ret)

        # This runs in sync mode as iterators use an error status to communicate
        # that there is no more data to iterate over.
        # TODO(b/77291417): Fix
        with context.execution_mode(context.SYNC):
            with ops.device(self._device):
                # TODO(ashankar): Consider removing this ops.device() contextmanager
                # and instead mimic ops placement in graphs: Operations on resource
                # handles execute on the same device as where the resource is placed.
                ret = gen_dataset_ops.iterator_get_next(
                    self._iterator_resource,
                    output_types=self._flat_output_types,
                    output_shapes=self._flat_output_shapes)

            try:
                # Fast path for the case `self._structure` is not a nested structure.
                return self._element_spec._from_compatible_tensor_list(ret)  # pylint: disable=protected-access
            except AttributeError:
                return structure.from_compatible_tensor_list(
                    self._element_spec, ret)
示例#4
0
  def _next_internal(self):
    """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
    if not context.executing_eagerly():
      with ops.device(self._device):
        ret = gen_dataset_ops.iterator_get_next(
            self._iterator_resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)
      return structure.from_compatible_tensor_list(self._element_spec, ret)

    # This runs in sync mode as iterators use an error status to communicate
    # that there is no more data to iterate over.
    # TODO(b/77291417): Fix
    with context.execution_mode(context.SYNC):
      with ops.device(self._device):
        # TODO(ashankar): Consider removing this ops.device() contextmanager
        # and instead mimic ops placement in graphs: Operations on resource
        # handles execute on the same device as where the resource is placed.
        # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
        # because in eager mode this code will run synchronously on the calling
        # thread. Therefore we do not need to make a defensive context switch
        # to a background thread, and can achieve a small constant performance
        # boost by invoking the iterator synchronously.
        ret = gen_dataset_ops.iterator_get_next_sync(
            self._iterator_resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)

      try:
        # Fast path for the case `self._structure` is not a nested structure.
        return self._element_spec._from_compatible_tensor_list(ret)  # pylint: disable=protected-access
      except AttributeError:
        return structure.from_compatible_tensor_list(self._element_spec, ret)
示例#5
0
    def get_next(self, name=None):
        """Returns a nested structure of `tf.Tensor`s containing the next element.

    Args:
      name: (Optional.) A name for the created operation.

    Returns:
      A nested structure of `tf.Tensor` objects.
    """
        self._get_next_call_count += 1
        if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
            warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)

        return sparse.deserialize_sparse_tensors(
            nest.pack_sequence_as(
                self._output_types,
                gen_dataset_ops.iterator_get_next(
                    self._iterator_resource,
                    output_types=nest.flatten(
                        sparse.as_dense_types(self._output_types,
                                              self._output_classes)),
                    output_shapes=nest.flatten(
                        sparse.as_dense_shapes(self._output_shapes,
                                               self._output_classes)),
                    name=name)), self._output_types, self._output_shapes,
            self._output_classes)
示例#6
0
  def get_next(self, name=None):
    """Returns a nested structure of `tf.Tensor`s containing the next element.

    Args:
      name: (Optional.) A name for the created operation.

    Returns:
      A nested structure of `tf.Tensor` objects.
    """
    self._get_next_call_count += 1
    if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
      warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)

    return sparse.deserialize_sparse_tensors(
        nest.pack_sequence_as(self._output_types,
                              gen_dataset_ops.iterator_get_next(
                                  self._iterator_resource,
                                  output_types=nest.flatten(
                                      sparse.as_dense_types(
                                          self._output_types,
                                          self._output_classes)),
                                  output_shapes=nest.flatten(
                                      sparse.as_dense_shapes(
                                          self._output_shapes,
                                          self._output_classes)),
                                  name=name)), self._output_types,
        self._output_shapes, self._output_classes)
示例#7
0
  def _next_internal(self):
    """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
    if not context.executing_eagerly():
      with ops.device(self._device):
        ret = gen_dataset_ops.iterator_get_next(
            self._iterator_resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)
      return self._structure._from_compatible_tensor_list(ret)  # pylint: disable=protected-access

    # This runs in sync mode as iterators use an error status to communicate
    # that there is no more data to iterate over.
    # TODO(b/77291417): Fix
    with context.execution_mode(context.SYNC):
      with ops.device(self._device):
        # TODO(ashankar): Consider removing this ops.device() contextmanager
        # and instead mimic ops placement in graphs: Operations on resource
        # handles execute on the same device as where the resource is placed.
        # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
        # because in eager mode this code will run synchronously on the calling
        # thread. Therefore we do not need to make a defensive context switch
        # to a background thread, and can achieve a small constant performance
        # boost by invoking the iterator synchronously.
        ret = gen_dataset_ops.iterator_get_next_sync(
            self._iterator_resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)

      return self._structure._from_compatible_tensor_list(ret)  # pylint: disable=protected-access
示例#8
0
    def get_next(self, name=None):
        """Returns the next element.

    In graph mode, you should typically call this method *once* and use its
    result as the input to another computation. A typical loop will then call
    `tf.Session.run` on the result of that computation. The loop will terminate
    when the `Iterator.get_next()` operation raises
    `tf.errors.OutOfRangeError`. The following skeleton shows how to use
    this method when building a training loop:

    ```python
    dataset = ...  # A `tf.data.Dataset` object.
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()

    # Build a TensorFlow graph that does something with each element.
    loss = model_function(next_element)
    optimizer = ...  # A `tf.compat.v1.train.Optimizer` object.
    train_op = optimizer.minimize(loss)

    with tf.compat.v1.Session() as sess:
      try:
        while True:
          sess.run(train_op)
      except tf.errors.OutOfRangeError:
        pass
    ```

    NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
    when you are distributing different elements to multiple devices in a single
    step. However, a common pitfall arises when users call `Iterator.get_next()`
    in each iteration of their training loop. `Iterator.get_next()` adds ops to
    the graph, and executing each op allocates resources (including threads); as
    a consequence, invoking it in every iteration of a training loop causes
    slowdown and eventual resource exhaustion. To guard against this outcome, we
    log a warning when the number of uses crosses a fixed threshold of
    suspiciousness.

    Args:
      name: (Optional.) A name for the created operation.

    Returns:
      A (nested) structure of values matching `tf.data.Iterator.element_spec`.
    """
        self._get_next_call_count += 1
        if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
            warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)

        # TODO(b/169442955): Investigate the need for this colocation constraint.
        with ops.colocate_with(self._iterator_resource):
            # pylint: disable=protected-access
            flat_ret = gen_dataset_ops.iterator_get_next(
                self._iterator_resource,
                output_types=self._flat_tensor_types,
                output_shapes=self._flat_tensor_shapes,
                name=name)
            return structure.from_tensor_list(self._element_spec, flat_ret)
 def next(self):
     """Return the next tf.Tensor from the dataset."""
     try:
         ret = gen_dataset_ops.iterator_get_next(
             self._resource,
             output_types=self._flat_output_types,
             output_shapes=self._flat_output_shapes)
         return nest.pack_sequence_as(self._output_types, ret)
     except errors.OutOfRangeError:
         raise StopIteration
示例#10
0
 def next(self):
   """Return the next tf.Tensor from the dataset."""
   try:
     ret = gen_dataset_ops.iterator_get_next(
         self._resource,
         output_types=self._flat_output_types,
         output_shapes=self._flat_output_shapes)
     return nest.pack_sequence_as(self._output_types, ret)
   except errors.OutOfRangeError:
     raise StopIteration
示例#11
0
  def get_next(self, name=None):
    """Returns a nested structure of `tf.Tensor`s representing the next element.

    In graph mode, you should typically call this method *once* and use its
    result as the input to another computation. A typical loop will then call
    `tf.Session.run` on the result of that computation. The loop will terminate
    when the `Iterator.get_next()` operation raises
    `tf.errors.OutOfRangeError`. The following skeleton shows how to use
    this method when building a training loop:

    ```python
    dataset = ...  # A `tf.data.Dataset` object.
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()

    # Build a TensorFlow graph that does something with each element.
    loss = model_function(next_element)
    optimizer = ...  # A `tf.train.Optimizer` object.
    train_op = optimizer.minimize(loss)

    with tf.Session() as sess:
      try:
        while True:
          sess.run(train_op)
      except tf.errors.OutOfRangeError:
        pass
    ```

    NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
    when you are distributing different elements to multiple devices in a single
    step. However, a common pitfall arises when users call `Iterator.get_next()`
    in each iteration of their training loop. `Iterator.get_next()` adds ops to
    the graph, and executing each op allocates resources (including threads); as
    a consequence, invoking it in every iteration of a training loop causes
    slowdown and eventual resource exhaustion. To guard against this outcome, we
    log a warning when the number of uses crosses a fixed threshold of
    suspiciousness.

    Args:
      name: (Optional.) A name for the created operation.

    Returns:
      A nested structure of `tf.Tensor` objects.
    """
    self._get_next_call_count += 1
    if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
      warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)

    # pylint: disable=protected-access
    flat_ret = gen_dataset_ops.iterator_get_next(
        self._iterator_resource,
        output_types=self._structure._flat_types,
        output_shapes=self._structure._flat_shapes, name=name)
    return self._structure._from_tensor_list(flat_ret)
示例#12
0
 def next(self):
   """Return the next tf.Tensor from the dataset."""
   try:
     # TODO(ashankar): Consider removing this ops.device() contextmanager
     # and instead mimic ops placement in graphs: Operations on resource
     # handles execute on the same device as where the resource is placed.
     with ops.device("/device:CPU:0"):
       ret = gen_dataset_ops.iterator_get_next(
           self._resource,
           output_types=self._flat_output_types,
           output_shapes=self._flat_output_shapes)
       return nest.pack_sequence_as(self._output_types, ret)
   except errors.OutOfRangeError:
     raise StopIteration
示例#13
0
 def next(self):
     """Return the next tf.Tensor from the dataset."""
     try:
         # TODO(ashankar): Consider removing this ops.device() contextmanager
         # and instead mimic ops placement in graphs: Operations on resource
         # handles execute on the same device as where the resource is placed.
         with ops.device("/device:CPU:0"):
             ret = gen_dataset_ops.iterator_get_next(
                 self._resource,
                 output_types=self._flat_output_types,
                 output_shapes=self._flat_output_shapes)
             return nest.pack_sequence_as(self._output_types, ret)
     except errors.OutOfRangeError:
         raise StopIteration
示例#14
0
    def get_next(self, name=None):
        """Returns a nested structure of `tf.Tensor`s containing the next element.

    Args:
      name: (Optional.) A name for the created operation.

    Returns:
      A nested structure of `tf.Tensor` objects.
    """
        return nest.pack_sequence_as(
            self._output_types,
            gen_dataset_ops.iterator_get_next(
                self._iterator_resource,
                output_types=nest.flatten(self._output_types),
                output_shapes=nest.flatten(self._output_shapes),
                name=name))
示例#15
0
  def get_next(self, name=None):
    """Returns a nested structure of `tf.Tensor`s containing the next element.

    Args:
      name: (Optional.) A name for the created operation.

    Returns:
      A nested structure of `tf.Tensor` objects.
    """
    return nest.pack_sequence_as(
        self._output_types,
        gen_dataset_ops.iterator_get_next(
            self._iterator_resource,
            output_types=nest.flatten(self._output_types),
            output_shapes=nest.flatten(self._output_shapes),
            name=name))
示例#16
0
 def next(self):
     """Return the next tf.Tensor from the dataset."""
     with ops.device(self._device):
         try:
             if self._buffer_resource_handle is not None:
                 ret = prefetching_ops.function_buffering_resource_get_next(
                     function_buffer_resource=self._buffer_resource_handle,
                     output_types=self._flat_output_types)
             else:
                 # TODO (ashankar): Consider removing this ops.device() contextmanager id:607 gh:608
                 # and instead mimic ops placement in graphs: Operations on resource
                 # handles execute on the same device as where the resource is placed.
                 ret = gen_dataset_ops.iterator_get_next(
                     self._resource,
                     output_types=self._flat_output_types,
                     output_shapes=self._flat_output_shapes)
         except errors.OutOfRangeError:
             raise StopIteration
         return nest.pack_sequence_as(self._output_types, ret)
示例#17
0
 def next(self):
   """Return the next tf.Tensor from the dataset."""
   try:
     # TODO(ashankar): Consider removing this ops.device() contextmanager
     # and instead mimic ops placement in graphs: Operations on resource
     # handles execute on the same device as where the resource is placed.
     with ops.device("/device:CPU:0"):
       ret = gen_dataset_ops.iterator_get_next(
           self._resource,
           output_types=self._flat_output_types,
           output_shapes=self._flat_output_shapes)
   except errors.OutOfRangeError:
     raise StopIteration
   # Copies tensors from CPU to the current device if necessary.
   # TODO(rohanj): This should be replaced by the mechanism to have the
   # runtime's threads copy tensors to the destination device.
   with ops.device(self._device):
     ret = [array_ops.identity(x) for x in ret]
     return nest.pack_sequence_as(self._output_types, ret)
示例#18
0
 def next(self):
   """Return the next tf.Tensor from the dataset."""
   with ops.device(self._device):
     try:
       if self._buffer_resource_handle is not None:
         ret = prefetching_ops.function_buffering_resource_get_next(
             function_buffer_resource=self._buffer_resource_handle,
             output_types=self._flat_output_types)
       else:
         # TODO(ashankar): Consider removing this ops.device() contextmanager
         # and instead mimic ops placement in graphs: Operations on resource
         # handles execute on the same device as where the resource is placed.
         ret = gen_dataset_ops.iterator_get_next(
             self._resource,
             output_types=self._flat_output_types,
             output_shapes=self._flat_output_shapes)
     except errors.OutOfRangeError:
       raise StopIteration
     return nest.pack_sequence_as(self._output_types, ret)
示例#19
0
    def _next_internal(self):
        """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
        with ops.device(self._device):
            if self._buffer_resource_handle is not None:
                ret = prefetching_ops.function_buffering_resource_get_next(
                    function_buffer_resource=self._buffer_resource_handle,
                    output_types=self._flat_output_types)
            else:
                # TODO(ashankar): Consider removing this ops.device() contextmanager
                # and instead mimic ops placement in graphs: Operations on resource
                # handles execute on the same device as where the resource is placed.
                ret = gen_dataset_ops.iterator_get_next(
                    self._resource,
                    output_types=self._flat_output_types,
                    output_shapes=self._flat_output_shapes)

        return sparse.deserialize_sparse_tensors(
            nest.pack_sequence_as(self._output_types, ret), self._output_types,
            self._output_shapes, self._output_classes)
示例#20
0
  def _next_internal(self):
    """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
    with ops.device(self._device):
      if self._buffer_resource_handle is not None:
        ret = prefetching_ops.function_buffering_resource_get_next(
            function_buffer_resource=self._buffer_resource_handle,
            output_types=self._flat_output_types)
      else:
        # TODO(ashankar): Consider removing this ops.device() contextmanager
        # and instead mimic ops placement in graphs: Operations on resource
        # handles execute on the same device as where the resource is placed.
        ret = gen_dataset_ops.iterator_get_next(
            self._resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)

    return sparse.deserialize_sparse_tensors(
        nest.pack_sequence_as(self._output_types, ret), self._output_types,
        self._output_shapes, self._output_classes)
示例#21
0
  def get_next(self, name=None):
    """Returns a nested structure of `tf.Tensor`s representing the next element.

    In graph mode, you should typically call this method *once* and use its
    result as the input to another computation. A typical loop will then call
    @{tf.Session.run} on the result of that computation. The loop will terminate
    when the `Iterator.get_next()` operation raises
    @{tf.errors.OutOfRangeError}. The following skeleton shows how to use
    this method when building a training loop:

    ```python
    dataset = ...  # A `tf.data.Dataset` object.
    iterator = dataset.make_initializable_iterator()
    next_element = iterator.get_next()

    # Build a TensorFlow graph that does something with each element.
    loss = model_function(next_element)
    optimizer = ...  # A `tf.train.Optimizer` object.
    train_op = optimizer.minimize(loss)

    with tf.Session() as sess:
      try:
        while True:
          sess.run(train_op)
      except tf.errors.OutOfRangeError:
        pass
    ```

    NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
    when you are distributing different elements to multiple devices in a single
    step. However, a common pitfall arises when users call `Iterator.get_next()`
    in each iteration of their training loop. `Iterator.get_next()` adds ops to
    the graph, and executing each op allocates resources (including threads); as
    a consequence, invoking it in every iteration of a training loop causes
    slowdown and eventual resource exhaustion. To guard against this outcome, we
    log a warning when the number of uses crosses a fixed threshold of
    suspiciousness.

    Args:
      name: (Optional.) A name for the created operation.

    Returns:
      A nested structure of `tf.Tensor` objects.
    """
    self._get_next_call_count += 1
    if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
      warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)

    return sparse.deserialize_sparse_tensors(
        nest.pack_sequence_as(self._output_types,
                              gen_dataset_ops.iterator_get_next(
                                  self._iterator_resource,
                                  output_types=nest.flatten(
                                      sparse.as_dense_types(
                                          self._output_types,
                                          self._output_classes)),
                                  output_shapes=nest.flatten(
                                      sparse.as_dense_shapes(
                                          self._output_shapes,
                                          self._output_classes)),
                                  name=name)), self._output_types,
        self._output_shapes, self._output_classes)