Ejemplo n.º 1
0
  def _next_internal(self):
    """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
    with ops.device(self._device):
      if self._buffer_resource_handle is not None:
        ret = prefetching_ops.function_buffering_resource_get_next(
            function_buffer_resource=self._buffer_resource_handle,
            output_types=self._flat_output_types)
      else:
        # TODO(ashankar): Consider removing this ops.device() contextmanager
        # and instead mimic ops placement in graphs: Operations on resource
        # handles execute on the same device as where the resource is placed.
        # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
        # because in eager mode this code will run synchronously on the calling
        # thread. Therefore we do not need to make a defensive context switch
        # to a background thread, and can achieve a small constant performance
        # boost by invoking the iterator synchronously.
        ret = gen_dataset_ops.iterator_get_next_sync(
            self._resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)

    return sparse.deserialize_sparse_tensors(
        nest.pack_sequence_as(self._output_types, ret), self._output_types,
        self._output_shapes, self._output_classes)
Ejemplo n.º 2
0
  def _create_ops(self, ds, ds_iterator, buffer_name, device0, device1):
    ds_iterator_handle = ds_iterator.string_handle()

    @function.Defun(dtypes.string)
    def _remote_fn(h):
      remote_iterator = iterator_ops.Iterator.from_string_handle(
          h, ds.output_types, ds.output_shapes)
      return remote_iterator.get_next()

    target = constant_op.constant(device0)
    with ops.device(device1):
      buffer_resource_handle = prefetching_ops.function_buffering_resource(
          f=_remote_fn,
          output_types=[dtypes.float32],
          target_device=target,
          string_arg=ds_iterator_handle,
          buffer_size=3,
          shared_name=buffer_name)

    with ops.device(device1):
      prefetch_op = prefetching_ops.function_buffering_resource_get_next(
          function_buffer_resource=buffer_resource_handle,
          output_types=[dtypes.float32])
      reset_op = prefetching_ops.function_buffering_resource_reset(
          function_buffer_resource=buffer_resource_handle)
      destroy_op = resource_variable_ops.destroy_resource_op(
          buffer_resource_handle, ignore_lookup_error=True)

    return (prefetch_op, reset_op, destroy_op)
Ejemplo n.º 3
0
  def _next_internal(self):
    """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
    with ops.device(self._device):
      if self._buffer_resource_handle is not None:
        ret = prefetching_ops.function_buffering_resource_get_next(
            function_buffer_resource=self._buffer_resource_handle,
            output_types=self._flat_output_types)
      else:
        # TODO(ashankar): Consider removing this ops.device() contextmanager
        # and instead mimic ops placement in graphs: Operations on resource
        # handles execute on the same device as where the resource is placed.
        # NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
        # because in eager mode this code will run synchronously on the calling
        # thread. Therefore we do not need to make a defensive context switch
        # to a background thread, and can achieve a small constant performance
        # boost by invoking the iterator synchronously.
        ret = gen_dataset_ops.iterator_get_next_sync(
            self._resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)

    return sparse.deserialize_sparse_tensors(
        nest.pack_sequence_as(self._output_types, ret), self._output_types,
        self._output_shapes, self._output_classes)
Ejemplo n.º 4
0
    def _prefetch_fn_helper(self, buffer_name, device0, device1):
        worker_config = config_pb2.ConfigProto()
        worker_config.device_count["CPU"] = 2

        def gen():
            for i in itertools.count(start=1, step=1):
                yield [i + 0.0]
                if i == 6:
                    self._event.set()

        with ops.device(device0):
            dataset_3 = dataset_ops.Dataset.from_generator(
                gen, (dtypes.float32))
            iterator_3 = dataset_3.make_one_shot_iterator()
            iterator_3_handle = iterator_3.string_handle()

        @function.Defun(dtypes.string)
        def _remote_fn(h):
            remote_iterator = iterator_ops.Iterator.from_string_handle(
                h, dataset_3.output_types, dataset_3.output_shapes)
            return remote_iterator.get_next()

        target = constant_op.constant(device0)
        with ops.device(device1):
            buffer_resource_handle = prefetching_ops.function_buffering_resource(
                f=_remote_fn,
                target_device=target,
                string_arg=iterator_3_handle,
                buffer_size=3,
                thread_pool_size=2,
                shared_name=buffer_name)

        with ops.device(device1):
            prefetch_op = prefetching_ops.function_buffering_resource_get_next(
                function_buffer_resource=buffer_resource_handle,
                output_types=[dtypes.float32])

        with self.test_session(config=worker_config) as sess:
            elem = sess.run(prefetch_op)
            self.assertEqual(elem, [1.0])
            elem = sess.run(prefetch_op)
            self.assertEqual(elem, [2.0])
            elem = sess.run(prefetch_op)
            self.assertEqual(elem, [3.0])
            elem = sess.run(prefetch_op)
            self.assertEqual(elem, [4.0])
            self._event.wait()
            elem = sess.run(prefetch_op)
            self.assertEqual(elem, [5.0])
            sess.run(
                resource_variable_ops.destroy_resource_op(
                    buffer_resource_handle, ignore_lookup_error=True))
Ejemplo n.º 5
0
 def _next_internal(self):
     """Returns a nested structure of `tf.Tensor`s containing the next element.
 """
     if self._buffer_resource_handle is not None:
         with ops.device(self._device):
             ret = prefetching_ops.function_buffering_resource_get_next(
                 function_buffer_resource=self._buffer_resource_handle,
                 output_types=self._flat_output_types)
         return sparse.deserialize_sparse_tensors(
             nest.pack_sequence_as(self._output_types, ret),
             self._output_types, self._output_shapes, self._output_classes)
     else:
         return super(Iterator, self)._next_internal()
Ejemplo n.º 6
0
 def _next_internal(self):
   """Returns a nested structure of `tf.Tensor`s containing the next element.
   """
   if self._buffer_resource_handle is not None:
     with ops.device(self._device):
       ret = prefetching_ops.function_buffering_resource_get_next(
           function_buffer_resource=self._buffer_resource_handle,
           output_types=self._flat_output_types)
     return sparse.deserialize_sparse_tensors(
         nest.pack_sequence_as(self._output_types, ret), self._output_types,
         self._output_shapes, self._output_classes)
   else:
     return super(Iterator, self)._next_internal()
  def _prefetch_fn_helper(self, buffer_name, device0, device1):
    worker_config = config_pb2.ConfigProto()
    worker_config.device_count["CPU"] = 2

    def gen():
      for i in itertools.count(start=1, step=1):
        yield [i + 0.0]
        if i == 6:
          self._event.set()

    with ops.device(device0):
      dataset_3 = dataset_ops.Dataset.from_generator(gen, (dtypes.float32))
      iterator_3 = dataset_3.make_one_shot_iterator()
      iterator_3_handle = iterator_3.string_handle()

    @function.Defun(dtypes.string)
    def _remote_fn(h):
      remote_iterator = iterator_ops.Iterator.from_string_handle(
          h, dataset_3.output_types, dataset_3.output_shapes)
      return remote_iterator.get_next()

    target = constant_op.constant(device0)
    with ops.device(device1):
      buffer_resource_handle = prefetching_ops.function_buffering_resource(
          f=_remote_fn,
          target_device=target,
          string_arg=iterator_3_handle,
          buffer_size=3,
          thread_pool_size=2,
          shared_name=buffer_name)

    with ops.device(device1):
      prefetch_op = prefetching_ops.function_buffering_resource_get_next(
          function_buffer_resource=buffer_resource_handle,
          output_types=[dtypes.float32])

    with self.test_session(config=worker_config) as sess:
      elem = sess.run(prefetch_op)
      self.assertEqual(elem, [1.0])
      elem = sess.run(prefetch_op)
      self.assertEqual(elem, [2.0])
      elem = sess.run(prefetch_op)
      self.assertEqual(elem, [3.0])
      elem = sess.run(prefetch_op)
      self.assertEqual(elem, [4.0])
      self._event.wait()
      elem = sess.run(prefetch_op)
      self.assertEqual(elem, [5.0])
      sess.run(
          resource_variable_ops.destroy_resource_op(
              buffer_resource_handle, ignore_lookup_error=True))
Ejemplo n.º 8
0
 def _next_internal(self):
   """Returns a nested structure of `tf.Tensor`s containing the next element.
   """
   # This runs in sync mode as iterators use an error status to communicate
   # that there is no more data to iterate over.
   # TODO(b/77291417): Fix
   with context.execution_mode(context.SYNC):
     if self._buffer_resource_handle is not None:
       with ops.device(self._device):
         ret = prefetching_ops.function_buffering_resource_get_next(
             function_buffer_resource=self._buffer_resource_handle,
             output_types=self._flat_output_types)
       return sparse.deserialize_sparse_tensors(
           nest.pack_sequence_as(self._output_types, ret), self._output_types,
           self._output_shapes, self._output_classes)
     else:
       return super(Iterator, self)._next_internal()
Ejemplo n.º 9
0
 def next(self):
     """Return the next tf.Tensor from the dataset."""
     with ops.device(self._device):
         try:
             if self._buffer_resource_handle is not None:
                 ret = prefetching_ops.function_buffering_resource_get_next(
                     function_buffer_resource=self._buffer_resource_handle,
                     output_types=self._flat_output_types)
             else:
                 # TODO (ashankar): Consider removing this ops.device() contextmanager id:607 gh:608
                 # and instead mimic ops placement in graphs: Operations on resource
                 # handles execute on the same device as where the resource is placed.
                 ret = gen_dataset_ops.iterator_get_next(
                     self._resource,
                     output_types=self._flat_output_types,
                     output_shapes=self._flat_output_shapes)
         except errors.OutOfRangeError:
             raise StopIteration
         return nest.pack_sequence_as(self._output_types, ret)
Ejemplo n.º 10
0
 def next(self):
   """Return the next tf.Tensor from the dataset."""
   with ops.device(self._device):
     try:
       if self._buffer_resource_handle is not None:
         ret = prefetching_ops.function_buffering_resource_get_next(
             function_buffer_resource=self._buffer_resource_handle,
             output_types=self._flat_output_types)
       else:
         # TODO(ashankar): Consider removing this ops.device() contextmanager
         # and instead mimic ops placement in graphs: Operations on resource
         # handles execute on the same device as where the resource is placed.
         ret = gen_dataset_ops.iterator_get_next(
             self._resource,
             output_types=self._flat_output_types,
             output_shapes=self._flat_output_shapes)
     except errors.OutOfRangeError:
       raise StopIteration
     return nest.pack_sequence_as(self._output_types, ret)
Ejemplo n.º 11
0
  def testStringsGPU(self):
    if not test_util.is_gpu_available():
      self.skipTest("No GPU available")

    device0 = "/job:localhost/replica:0/task:0/cpu:0"
    device1 = "/job:localhost/replica:0/task:0/gpu:0"

    ds = dataset_ops.Dataset.from_tensor_slices(["a", "b", "c"])
    ds_iterator = ds.make_one_shot_iterator()
    ds_iterator_handle = ds_iterator.string_handle()

    @function.Defun(dtypes.string)
    def _remote_fn(h):
      remote_iterator = iterator_ops.Iterator.from_string_handle(
          h, ds.output_types, ds.output_shapes)
      return remote_iterator.get_next()

    target = constant_op.constant(device0)
    with ops.device(device1):
      buffer_resource_handle = prefetching_ops.function_buffering_resource(
          f=_remote_fn,
          output_types=[dtypes.string],
          target_device=target,
          string_arg=ds_iterator_handle,
          buffer_size=3,
          shared_name="strings")

    with ops.device(device1):
      prefetch_op = prefetching_ops.function_buffering_resource_get_next(
          function_buffer_resource=buffer_resource_handle,
          output_types=[dtypes.string])
      destroy_op = resource_variable_ops.destroy_resource_op(
          buffer_resource_handle, ignore_lookup_error=True)

    with self.cached_session() as sess:
      self.assertEqual([b"a"], sess.run(prefetch_op))
      self.assertEqual([b"b"], sess.run(prefetch_op))
      self.assertEqual([b"c"], sess.run(prefetch_op))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(prefetch_op)

      sess.run(destroy_op)
Ejemplo n.º 12
0
    def _next_internal(self):
        """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
        with ops.device(self._device):
            if self._buffer_resource_handle is not None:
                ret = prefetching_ops.function_buffering_resource_get_next(
                    function_buffer_resource=self._buffer_resource_handle,
                    output_types=self._flat_output_types)
            else:
                # TODO(ashankar): Consider removing this ops.device() contextmanager
                # and instead mimic ops placement in graphs: Operations on resource
                # handles execute on the same device as where the resource is placed.
                ret = gen_dataset_ops.iterator_get_next(
                    self._resource,
                    output_types=self._flat_output_types,
                    output_shapes=self._flat_output_shapes)

        return sparse.deserialize_sparse_tensors(
            nest.pack_sequence_as(self._output_types, ret), self._output_types,
            self._output_shapes, self._output_classes)
Ejemplo n.º 13
0
  def _next_internal(self):
    """Returns a nested structure of `tf.Tensor`s containing the next element.
    """
    with ops.device(self._device):
      if self._buffer_resource_handle is not None:
        ret = prefetching_ops.function_buffering_resource_get_next(
            function_buffer_resource=self._buffer_resource_handle,
            output_types=self._flat_output_types)
      else:
        # TODO(ashankar): Consider removing this ops.device() contextmanager
        # and instead mimic ops placement in graphs: Operations on resource
        # handles execute on the same device as where the resource is placed.
        ret = gen_dataset_ops.iterator_get_next(
            self._resource,
            output_types=self._flat_output_types,
            output_shapes=self._flat_output_shapes)

    return sparse.deserialize_sparse_tensors(
        nest.pack_sequence_as(self._output_types, ret), self._output_types,
        self._output_shapes, self._output_classes)
Ejemplo n.º 14
0
def get_images_and_labels(function_buffering_resource, data_type):
    """Given a FunctionBufferingResource obtains images and labels from it."""
    return prefetching_ops.function_buffering_resource_get_next(
        function_buffer_resource=function_buffering_resource,
        output_types=[data_type, tf.int32])