Beispiel #1
0
    def test_generated_rpc_ops(self):
        @eager_def_function.function(input_signature=[
            tensor_spec.TensorSpec([], dtypes.int32),
            tensor_spec.TensorSpec([], dtypes.int32)
        ])
        def remote_fn(a, b):
            return math_ops.multiply(a, b)

        concrete_remote_fn = remote_fn.get_concrete_function()

        a = variables.Variable(2, dtype=dtypes.int32)
        b = variables.Variable(3, dtype=dtypes.int32)

        port = portpicker.pick_unused_port()
        address = "localhost:{}".format(port)
        server_resource = rpc_ops.gen_rpc_ops.rpc_server(
            server_address=address)

        rpc_ops.gen_rpc_ops.rpc_server_register(
            server_resource,
            f=concrete_remote_fn,
            captured_inputs=concrete_remote_fn.captured_inputs,
            output_specs=rpc_ops.get_output_specs_from_function(
                concrete_remote_fn),
            method_name="multiply")

        rpc_ops.gen_rpc_ops.rpc_server_start(server_resource)
        client_handle, _ = rpc_ops.gen_rpc_ops.rpc_client(
            server_address=address, timeout_in_ms=5000)
        future_resource, deleter = rpc_ops.gen_rpc_ops.rpc_call(
            client_handle,
            args=[a, b],
            method_name="multiply",
            timeout_in_ms=0)

        error_code, _ = rpc_ops.gen_rpc_ops.rpc_check_status(future_resource)
        self.assertAllEqual(error_code, 0)
        self.assertAllEqual(
            rpc_ops.gen_rpc_ops.rpc_get_value(future_resource,
                                              Tout=[dtypes.int32]), [6])

        resource_variable_ops.EagerResourceDeleter(
            handle=server_resource, handle_device=server_resource.device)

        resource_variable_ops.EagerResourceDeleter(
            handle=client_handle, handle_device=client_handle.device)

        rpc_ops.gen_rpc_ops.delete_rpc_future_resource(future_resource,
                                                       deleter)
Beispiel #2
0
 def __init__(self,
              num_threads,
              display_name=None,
              max_intra_op_parallelism=1):
     """Creates a `PrivateThreadPool` with the given number of threads."""
     if context.executing_eagerly():
         shared_name = _generate_shared_name("privatethreadpool")
         if compat.forward_compatible(2019, 8, 3):
             self._resource = ged_ops.thread_pool_handle(
                 num_threads=num_threads,
                 max_intra_op_parallelism=max_intra_op_parallelism,
                 display_name=display_name,
                 shared_name=shared_name)
         else:
             self._resource = ged_ops.experimental_thread_pool_handle(
                 num_threads=num_threads,
                 max_intra_op_parallelism=max_intra_op_parallelism,
                 display_name=display_name,
                 shared_name=shared_name)
         self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
             handle=self._resource,
             handle_device=context.context().device_name)
     else:
         if compat.forward_compatible(2019, 8, 3):
             self._resource = ged_ops.thread_pool_handle(
                 num_threads=num_threads,
                 max_intra_op_parallelism=max_intra_op_parallelism,
                 display_name=display_name)
         else:
             self._resource = ged_ops.experimental_thread_pool_handle(
                 num_threads=num_threads,
                 max_intra_op_parallelism=max_intra_op_parallelism,
                 display_name=display_name)
Beispiel #3
0
  def __init__(self,
               address: str,
               name: str = "",
               list_registered_methods=False,
               timeout_in_ms=0):
    self._client_handle, methods = gen_rpc_ops.rpc_client(
        shared_name=name,
        server_address=address,
        list_registered_methods=list_registered_methods,
        timeout_in_ms=timeout_in_ms)
    if context.executing_eagerly():
      self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
          handle=self._client_handle, handle_device=self._client_handle.device)
    else:
      raise NotImplementedError(
          "Client creation is supported only in eager mode.")
    self._server_address = address
    decoder = nested_structure_coder.StructureCoder()
    self._method_registry = {}
    for method in methods.numpy():

      m = rpc_pb2.RegisteredMethod()
      m.ParseFromString(method)
      output_specs = decoder.decode_proto(m.output_specs)
      input_specs = decoder.decode_proto(m.input_specs)
      self._method_registry[m.method] = output_specs
      # TODO(ishark): Perhaps doc string can also be taken as input during
      # function registration.
      doc_string = "RPC Call for " + m.method + " method to server " + address
      self._add_method(m.method, output_specs, input_specs, self._client_handle,
                       doc_string)
Beispiel #4
0
    def __init__(self, dataset):
        """Creates a new iterator over the given dataset.

    For example:
    ```python
    dataset = tf.data.Dataset.range(4)
    for x in Iterator(dataset):
      print(x)
    ```

    Tensors produced will be placed on the device on which this iterator object
    was created.

    Args:
      dataset: A `tf.data.Dataset` object.

    Raises:
      TypeError: If `dataset` is an unsupported type.
      RuntimeError: When invoked without eager execution enabled.
    """
        if isinstance(dataset, prefetching_ops._PrefetchToDeviceDataset):  # pylint: disable=protected-access
            raise TypeError(
                "`tf.contrib.data.prefetch_to_device()` is not compatible with "
                "`tf.contrib.eager.Iterator`. Use `for ... in dataset:` to iterate "
                "over the dataset instead.")

        super(Iterator, self).__init__(dataset)
        if not context.context().device_spec.device_type:
            is_remote_device = False
        else:
            is_remote_device = context.context(
            ).device_spec.device_type != "CPU"
        self._buffer_resource_handle = None
        if is_remote_device:
            with ops.device("/device:CPU:0"):
                iter_string_handle = gen_dataset_ops.iterator_to_string_handle(
                    self._resource)

                @function.Defun(dtypes.string)
                def remote_fn(h):
                    remote_iterator = iterator_ops.Iterator.from_string_handle(
                        h, self.output_types, self.output_shapes,
                        self.output_classes)
                    return remote_iterator.get_next()

                remote_fn.add_to_graph(None)
                target = constant_op.constant("/device:CPU:0")
            with ops.device(self._device):
                self._buffer_resource_handle = prefetching_ops.function_buffering_resource(  # pylint: disable=line-too-long
                    string_arg=iter_string_handle,
                    output_types=self._flat_output_types,
                    f=remote_fn,
                    target_device=target,
                    buffer_size=10,
                    container="",
                    shared_name=_generate_shared_name(
                        "contrib_eager_iterator_function_buffer_resource"))
                self._buffer_resource_deleter = resource_variable_ops.EagerResourceDeleter(  # pylint: disable=line-too-long
                    handle=self._buffer_resource_handle,
                    handle_device=self._device)
Beispiel #5
0
 def __init__(self, address: str):
   self._server_handle = gen_rpc_ops.rpc_server(address)
   if context.executing_eagerly():
     self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
         handle=self._server_handle, handle_device=self._server_handle.device)
   else:
     raise NotImplementedError("Please create the server outside tf.function.")
Beispiel #6
0
 def  __init__(self, resource, init_op_fn):
   self._resource = resource
   # TODO(nickfelt): cache constructed ops in graph mode
   self._init_op_fn = init_op_fn
   if context.executing_eagerly() and self._resource is not None:
     self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
         handle=self._resource, handle_device="cpu:0")
Beispiel #7
0
 def  __init__(self, resource, init_op_fn):
   self._resource = resource
   self._init_op_fn = init_op_fn
   init_op = self.init()
   if context.executing_eagerly():
     self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
         handle=self._resource, handle_device="cpu:0")
   else:
     ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME, init_op)
Beispiel #8
0
 def __init__(self, shared_name, init_op_fn, name=None):
     self._resource = gen_summary_ops.summary_writer(
         shared_name=shared_name, name=name)
     self._init_op_fn = init_op_fn
     self._init_op = init_op_fn(self._resource)
     self._closed = False
     if context.executing_eagerly():
         self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
             handle=self._resource, handle_device="cpu:0")
     else:
         ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME,
                               self._init_op)
Beispiel #9
0
 def __init__(self, num_threads, display_name=None):
   """Creates a `PrivateThreadPool` with the given number of threads."""
   if context.executing_eagerly():
     shared_name = _generate_shared_name("privatethreadpool")
     self._resource = gen_dataset_ops.thread_pool_handle(
         num_threads=num_threads,
         display_name=display_name,
         shared_name=shared_name)
     self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
         handle=self._resource, handle_device=context.context().device_name)
   else:
     self._resource = gen_dataset_ops.thread_pool_handle(
         num_threads=num_threads, display_name=display_name)
Beispiel #10
0
 def __init__(self, shared_name, init_op_fn, name=None, v2=False):
     self._resource = gen_summary_ops.summary_writer(
         shared_name=shared_name, name=name)
     # TODO(nickfelt): cache other constructed ops in graph mode
     self._init_op_fn = init_op_fn
     self._init_op = init_op_fn(self._resource)
     self._v2 = v2
     self._closed = False
     if context.executing_eagerly():
         self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
             handle=self._resource, handle_device="cpu:0")
     else:
         global _SUMMARY_WRITER_INIT_OP
         key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
         _SUMMARY_WRITER_INIT_OP.setdefault(key, []).append(self._init_op)
Beispiel #11
0
    def __init__(self, dataset):
        """Creates a new iterator over the given dataset.

    For example:
    ```python
    dataset = tf.data.Dataset.range(4)
    for x in Iterator(dataset):
      print(x)
    ```

    Tensors produced will be placed on the device on which this iterator object
    was created.

    Args:
      dataset: A `tf.data.Dataset` object.

    Raises:
      RuntimeError: When invoked without eager execution enabled.
    """

        if not context.executing_eagerly():
            raise RuntimeError(
                "{} objects can only be used when eager execution is enabled, use "
                "tf.data.Dataset.make_initializable_iterator or "
                "tf.data.Dataset.make_one_shot_iterator for graph construction"
                .format(type(self)))
        with ops.device("/device:CPU:0"):
            ds_variant = dataset._as_variant_tensor()  # pylint: disable=protected-access
            self._output_classes = dataset.output_classes
            self._output_types = dataset.output_types
            self._output_shapes = dataset.output_shapes
            self._flat_output_types = nest.flatten(
                sparse.as_dense_types(self._output_types,
                                      self._output_classes))
            self._flat_output_shapes = nest.flatten(
                sparse.as_dense_shapes(self._output_shapes,
                                       self._output_classes))
            self._resource = gen_dataset_ops.iterator(
                shared_name="",
                container=_generate_shared_name("eageriterator"),
                output_types=self._flat_output_types,
                output_shapes=self._flat_output_shapes)
            gen_dataset_ops.make_iterator(ds_variant, self._resource)
            # Delete the resource when this object is deleted
            self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
                handle=self._resource, handle_device="/device:CPU:0")
        self._device = context.context().device_name
    def __init__(self, dataset):
        """Creates a new iterator over the given dataset.

    For example:
    ```python
    dataset = tf.data.Dataset.range(4)
    for x in Iterator(dataset):
      print(x)
    ```

    Tensors produced will be placed on the device on which this iterator object
    was created.

    Args:
      dataset: A `tf.data.Dataset` object.

    Raises:
      RuntimeError: When invoked without eager execution enabled.
    """

        if not context.executing_eagerly():
            raise RuntimeError(
                "{} objects can only be used when eager execution is enabled, use "
                "tf.data.Dataset.make_initializable_iterator or "
                "tf.data.Dataset.make_one_shot_iterator for graph construction"
                .format(type(self)))
        self._device = context.context().device_name
        with ops.device("/cpu:0"):
            # pylint: disable=protected-access
            dataset = dataset._apply_options()
            ds_variant = dataset._variant_tensor
            self._structure = structure_lib.convert_legacy_structure(
                dataset.output_types, dataset.output_shapes,
                dataset.output_classes)
            self._flat_output_types = self._structure._flat_types
            self._flat_output_shapes = self._structure._flat_shapes
            with ops.colocate_with(ds_variant):
                self._resource = gen_dataset_ops.anonymous_iterator(
                    output_types=self._flat_output_types,
                    output_shapes=self._flat_output_shapes)
                gen_dataset_ops.make_iterator(ds_variant, self._resource)
                # Delete the resource when this object is deleted
                self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
                    handle=self._resource, handle_device=self._device)
Beispiel #13
0
 def __init__(self,
              shared_name,
              init_op_fn,
              name=None,
              v2=False,
              metadata=None):
     self._resource = gen_summary_ops.summary_writer(
         shared_name=shared_name, name=name)
     # TODO(nickfelt): cache other constructed ops in graph mode
     self._init_op_fn = init_op_fn
     self._init_op = init_op_fn(self._resource)
     self._v2 = v2
     self._metadata = {} if metadata is None else metadata
     self._closed = False
     if context.executing_eagerly():
         self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
             handle=self._resource, handle_device="cpu:0")
     else:
         ops.add_to_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME,
                               self._init_op)
Beispiel #14
0
  def __init__(self, server_addresses):
    """Creates and starts the gRPC server.

    Args:
      server_addresses: A list of strings containing one or more server
        addresses.
    """
    if not tf.executing_eagerly():
      raise ValueError("Only eager mode is currently supported.")

    self._handle = gen_grpc_ops.grpc_server_resource_handle_op(
        shared_name=context.shared_name(None))
    # Delete the resource when this object is deleted.
    self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
        handle=self._handle, handle_device=context.context().device_name)
    gen_grpc_ops.create_grpc_server(self._handle, server_addresses)

    # References to tf.Variable's, etc. used in a tf.function to prevent them
    # from being deallocated.
    self._keep_alive = []
Beispiel #15
0
  def __init__(self, server_address):
    """Creates and starts the gRPC client.

    Args:
      server_address: A string containing the server address.
    """
    if not tf.executing_eagerly():
      raise ValueError("Only eager mode is currently supported.")

    self._handle = gen_grpc_ops.grpc_client_resource_handle_op(
        shared_name=context.shared_name(None))
    self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
        handle=self._handle, handle_device=context.context().device_name)
    method_signatures = gen_grpc_ops.create_grpc_client(self._handle,
                                                        server_address).numpy()
    m = service_pb2.MethodOutputSignature()
    v = struct_pb2.StructuredValue()
    for sig in method_signatures:
      assert m.ParseFromString(sig)
      decoder = nested_structure_coder.StructureCoder()
      assert v.ParseFromString(m.output_specs)
      decoded_output_specs = decoder.decode_proto(v)
      self._add_method(m.name, decoded_output_specs)
Beispiel #16
0
    def __init__(
            self,  # pylint: disable=super-init-not-called
            initial_value=None,
            trainable=None,
            caching_device=None,
            name=None,
            dtype=None,
            constraint=None,
            add_initializers_to=None,
            lifted_initializer_graph=None,
            synchronization=None,
            aggregation=None,
            **unused_kwargs):
        """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, GradientTapes automatically watch uses of this
        Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.
      add_initializers_to: if not None and not in legacy graph mode, the
        initializer tensor will be added to this map in addition to adding the
        assignment to the function.
      lifted_initializer_graph: FuncGraph to try to lift initializers to.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize. If `synchronization` is set to `ON_READ`,
        `trainable` must not be set to `True`.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
        if not ops.inside_function():
            # If we've been init_scope()d out of the function definition nothing to do
            # here; we can't really do the capturing or conditional logic.
            resource_variable_ops.ResourceVariable.__init__(
                self,
                initial_value=initial_value,
                trainable=trainable,
                caching_device=caching_device,
                name=name,
                dtype=dtype,
                constraint=constraint)
            return
        with ops.init_scope():
            self._in_graph_mode = not context.executing_eagerly()
        if initial_value is None:
            raise ValueError("initial_value must be specified.")
        init_from_fn = callable(initial_value)

        if constraint is not None and not callable(constraint):
            raise ValueError("The `constraint` argument must be a callable.")

        if isinstance(initial_value, trackable.CheckpointInitialValue):
            self._maybe_initialize_trackable()
            self._update_uid = initial_value.checkpoint_position.restore_uid
            initial_value = initial_value.wrapped_value

        synchronization, aggregation, trainable = (
            variables.validate_synchronization_aggregation_trainable(
                synchronization, aggregation, trainable, name))
        self._trainable = trainable
        self._synchronization = synchronization
        self._aggregation = aggregation
        self._save_slice_info = None
        self._initial_value = None
        self._initializer_op = None
        self._is_initialized_op = None
        self._graph_element = None
        self._cached_value = None
        # Store the graph key so optimizers know how to only retrieve variables from
        # this graph. Guaranteed to be the same as the eager graph_key.
        self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
        with ops.name_scope(name, "Variable",
                            [] if init_from_fn else [initial_value]) as name:
            # pylint: disable=protected-access
            with ops.init_scope():
                handle_name = ops.name_from_scope_name(name)
                unique_id = "%s_%d" % (handle_name, ops.uid())
                shared_name = context.shared_name(unique_id)
            with ops.name_scope("Initializer"), ops.device(None):
                initial_value = ops.convert_to_tensor(
                    initial_value() if init_from_fn else initial_value,
                    name="initial_value",
                    dtype=dtype)
            with ops.init_scope():
                self._handle = resource_variable_ops.eager_safe_variable_handle(
                    initial_value=initial_value,
                    shared_name=shared_name,
                    name=name,
                    graph_mode=self._in_graph_mode)
            self._shape = initial_value.shape
            self._unique_id = unique_id
            self._handle_name = handle_name + ":0"
            self._dtype = initial_value.dtype.base_dtype
            self._constraint = constraint
            assert initial_value is not None
            if self._in_graph_mode:
                with ops.init_scope():
                    outer_graph = ops.get_default_graph()
                func_graph = ops.get_default_graph()
                function_placeholders = (func_graph.inputs +
                                         func_graph.internal_captures)
                placeholder_ops = set(
                    [tensor.op for tensor in function_placeholders])
                lifted_initializer = lift_to_graph.lift_to_graph(
                    [initial_value],
                    outer_graph,
                    disallowed_placeholders=placeholder_ops)[initial_value]
                with ops.init_scope():
                    self._initial_value = lifted_initializer
                    with ops.name_scope("IsInitialized"):
                        self._is_initialized_op = (
                            resource_variable_ops.var_is_initialized_op(
                                self._handle))
                    if initial_value is not None:
                        with ops.name_scope("Assign") as n, ops.colocate_with(
                                self._handle):
                            self._initializer_op = resource_variable_ops.assign_variable_op(
                                self._handle, lifted_initializer, name=n)
                    with ops.name_scope("Read"), ops.colocate_with(
                            self._handle):
                        # Manually assign reads to the handle's device to avoid log
                        # messages.
                        with ops.device(self._handle.device):
                            value = self._read_variable_op()
                        self._graph_element = value
                    ops.add_to_collection(ops.GraphKeys.GLOBAL_VARIABLES, self)
            else:
                if add_initializers_to is not None:
                    add_initializers_to[self] = initial_value

                def assign_fn():
                    with ops.name_scope("Assign") as n, ops.colocate_with(
                            self._handle):
                        resource_variable_ops.assign_variable_op(self._handle,
                                                                 initial_value,
                                                                 name=n)
                        # Returning values to keep tf.cond happy.
                    return ops.convert_to_tensor(1)

                def not_assign_fn():
                    return ops.convert_to_tensor(0)

                # Note: this cond is always guaranteed to run because we're inside a
                # defun which will insert automatic control dependencies.
                control_flow_ops.cond(
                    resource_variable_ops.var_is_initialized_op(self._handle),
                    not_assign_fn, assign_fn)

        # After the handle has been created, set up a way to clean it up when
        # executing eagerly. We'll hold the only reference to the deleter, so that
        # when this object is garbage collected the deleter will be too. This
        # means ResourceVariables can be part of reference cycles without those
        # cycles being uncollectable.
        if not self._in_graph_mode:
            self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
                handle=self._handle, handle_device=self._handle.device)
        self._cached_shape_as_list = None
Beispiel #17
0
    def __init__(self,
                 dataset,
                 devices,
                 max_buffer_size=1,
                 prefetch_buffer_size=1,
                 source_device="/cpu:0"):
        """Constructs a MultiDeviceIterator.

    Args:
      dataset: The input dataset to be iterated over.
      devices: The list of devices to fetch data to.
      max_buffer_size: Maximum size of the host side per device buffer to keep.
      prefetch_buffer_size: if > 1, then we setup a buffer on each device to
        prefetch into.
      source_device: The host device to place the `dataset` on.  In order to
        prevent deadlocks, if the prefetch_buffer_size is greater than the
        max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.
    """
        options = dataset_ops.Options()
        options.experimental_distribute.num_devices = len(devices)
        dataset = dataset.with_options(options)
        self._dataset = dataset._apply_options()  # pylint: disable=protected-access
        self._experimental_slack = dataset.options().experimental_slack
        self._devices = devices
        self._source_device = source_device
        self._source_device_tensor = ops.convert_to_tensor(source_device)
        self._max_buffer_size = max_buffer_size
        self._prefetch_buffer_size = prefetch_buffer_size

        if self._prefetch_buffer_size > self._max_buffer_size:
            self._max_buffer_size = self._prefetch_buffer_size

        # Create the MultiDeviceIterator.
        with ops.device(self._source_device):
            # TODO(b/121378567): Get rid of this shared_name hack.
            shared_name = ""
            if context.executing_eagerly():
                shared_name = context.shared_name()
            self._multi_device_iterator_resource = (
                gen_dataset_ops.multi_device_iterator(
                    devices=self._devices,
                    shared_name=shared_name,
                    container="",
                    **self._dataset._flat_structure))  # pylint: disable=protected-access
            if context.executing_eagerly():
                # Delete the resource when this object is deleted
                self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
                    handle=self._multi_device_iterator_resource,
                    handle_device=self._source_device)

            # The incarnation ID is used to ensure consistency between the per-device
            # iterators and the multi-device iterator.
            self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
                self._dataset._variant_tensor,  # pylint: disable=protected-access
                self._multi_device_iterator_resource,
                max_buffer_size=self._max_buffer_size)

        self._prototype_device_datasets = []
        for i, device in enumerate(self._devices):
            with ops.device(device):
                ds = _PerDeviceGenerator(i,
                                         self._multi_device_iterator_resource,
                                         self._incarnation_id,
                                         self._source_device_tensor,
                                         self._dataset.element_spec)
                self._prototype_device_datasets.append(ds)

        # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
        # initialize the device side of the pipeline. This would allow the
        # MultiDeviceIterator to choose, for example, to move some transformations
        # into the device side from its input. It might be useful in rewriting.
        # Create the per device iterators.
        self._device_iterators = []
        for i, device in enumerate(self._devices):
            with ops.device(device):
                ds = _create_device_dataset(self._prototype_device_datasets[i],
                                            self._incarnation_id,
                                            self._prefetch_buffer_size,
                                            self._experimental_slack)
                if context.executing_eagerly():
                    self._device_iterators.append(
                        dataset_ops.make_one_shot_iterator(ds))
                else:
                    self._device_iterators.append(
                        dataset_ops.make_initializable_iterator(ds))

        if not context.executing_eagerly():
            device_iterator_initializers = [
                iterator.initializer for iterator in self._device_iterators
            ]
            self._initializer = control_flow_ops.group(
                *device_iterator_initializers)
Beispiel #18
0
 def _set_up_resource_deleter(self):
   self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
       handle=self._resource, handle_device="cpu:0")
    def __init__(self,
                 dataset,
                 devices,
                 max_buffer_size=1,
                 prefetch_buffer_size=1,
                 source_device="/cpu:0"):
        """Constructs a MultiDeviceIterator.

    Args:
      dataset: The input dataset to be iterated over.
      devices: The list of devices to fetch data to.
      max_buffer_size: Maximum size of the host side per device buffer to keep.
      prefetch_buffer_size: if > 1, then we setup a buffer on each device
        to prefetch into.
      source_device: The host device to place the `dataset` on.

    Raises:
      RuntimeError: If run in Eager mode.
    """
        self._dataset = dataset._apply_options()  # pylint: disable=protected-access
        self._devices = devices
        self._source_device = source_device
        self._source_device_tensor = ops.convert_to_tensor(source_device)

        # Create the MultiDeviceIterator.
        with ops.device(self._source_device):
            # TODO(b/121378567): Get rid of this shared_name hack.
            shared_name = ""
            if context.executing_eagerly():
                shared_name = context.shared_name()
            self._multi_device_iterator_resource = (
                gen_dataset_ops.multi_device_iterator(
                    devices=self._devices,
                    shared_name=shared_name,
                    container="",
                    **dataset_ops.flat_structure(dataset)))
            if context.executing_eagerly():
                # Delete the resource when this object is deleted
                self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
                    handle=self._multi_device_iterator_resource,
                    handle_device=self._source_device)

            # The incarnation ID is used to ensure consistency between the per-device
            # iterators and the multi-device iterator.
            self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
                self._dataset._variant_tensor,  # pylint: disable=protected-access
                self._multi_device_iterator_resource,
                max_buffer_size=max_buffer_size)

        # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
        # initialize the device side of the pipeline. This would allow the
        # MultiDeviceIterator to choose, for example, to move some transformations
        # into the device side from its input. It might be useful in rewriting.
        # Create the per device iterators.
        self._device_iterators = []
        for i, device in enumerate(self._devices):
            with ops.device(device):
                ds = _PerDeviceGenerator(i,
                                         self._multi_device_iterator_resource,
                                         self._incarnation_id,
                                         self._source_device_tensor,
                                         dataset._element_structure)  # pylint: disable=protected-access
                if prefetch_buffer_size > 0:
                    ds = ds.prefetch(prefetch_buffer_size)
                # TODO(jsimsa): Enable auto-tuning and optimizations when supported for
                # non-CPU devices.
                options = dataset_ops.Options()
                options.experimental_autotune = False
                options.experimental_optimization.apply_default_optimizations = False
                ds = ds.with_options(options)
                if context.executing_eagerly():
                    self._device_iterators.append(
                        dataset_ops.make_one_shot_iterator(ds))
                else:
                    self._device_iterators.append(
                        dataset_ops.make_initializable_iterator(ds))

        if not context.executing_eagerly():
            device_iterator_initializers = [
                iterator.initializer for iterator in self._device_iterators
            ]
            self._initializer = control_flow_ops.group(
                *device_iterator_initializers)
Beispiel #20
0
    def __init__(self, dataset):
        """Creates a new iterator over the given dataset.

    For example:
    ```python
    dataset = tf.data.Dataset.range(4)
    for x in Iterator(dataset):
      print(x)
    ```

    Tensors produced will be placed on the device on which this iterator object
    was created.

    Args:
      dataset: A `tf.data.Dataset` object.

    Raises:
      RuntimeError: When invoked without eager execution enabled.
    """

        if not context.in_eager_mode():
            raise RuntimeError(
                "{} objects can only be used when eager execution is enabled, use "
                "tf.data.Dataset.make_iterator or "
                "tf.data.Dataset.make_one_shot_iterator for graph construction"
                .format(type(self)))
        with ops.device("/device:CPU:0"):
            ds_variant = dataset._as_variant_tensor()  # pylint: disable=protected-access
            self._output_types = dataset.output_types
            self._output_shapes = dataset.output_shapes
            self._flat_output_types = nest.flatten(dataset.output_types)
            self._flat_output_shapes = nest.flatten(dataset.output_shapes)
            self._resource = gen_dataset_ops.iterator(
                container="",
                shared_name=_generate_shared_name("eager_iterator"),
                output_types=self._flat_output_types,
                output_shapes=self._flat_output_shapes)
            gen_dataset_ops.make_iterator(ds_variant, self._resource)
            # Delete the resource when this object is deleted
            self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
                handle=self._resource, handle_device="/device:CPU:0")
        self._device = context.context().device_name
        self._buffer_resource_handle = None
        if not context.context().device_spec.device_type:
            is_remote_device = False
        else:
            is_remote_device = context.context(
            ).device_spec.device_type != "CPU"
        if is_remote_device:
            with ops.device("/device:CPU:0"):
                iter_string_handle = gen_dataset_ops.iterator_to_string_handle(
                    self._resource)

                @function.Defun(dtypes.string)
                def remote_fn(h):
                    remote_iterator = iterator_ops.Iterator.from_string_handle(
                        h, self._output_types, self._output_shapes)
                    return remote_iterator.get_next()

                remote_fn.add_to_graph(None)
                target = constant_op.constant("/device:CPU:0")
            with ops.device(self._device):
                self._buffer_resource_handle = prefetching_ops.function_buffering_resource(
                    string_arg=iter_string_handle,
                    f=remote_fn,
                    target_device=target,
                    buffer_size=10,
                    thread_pool_size=1,
                    container="",
                    shared_name=_generate_shared_name(
                        "function_buffer_resource"))
                self._buffer_resource_deleter = resource_variable_ops.EagerResourceDeleter(
                    handle=self._buffer_resource_handle,
                    handle_device=self._device)
Beispiel #21
0
 def __init__(self, resource):
     self._resource = resource
     if context.in_eager_mode():
         self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
             handle=self._resource, handle_device="cpu:0")
    def __init__(self,
                 logdir,
                 max_queue=10,
                 flush_secs=120,
                 filename_suffix=""):
        """Summary writer for TensorBoard, compatible with eager execution.

    If necessary, multiple instances of `SummaryWriter` can be created, with
    distinct `logdir`s and `name`s. Each `SummaryWriter` instance will retain
    its independent `global_step` counter and data writing destination.

    Example:
    ```python
    writer = tfe.SummaryWriter("my_model")

    # ... Code that sets up the model and data batches ...

    for _ in xrange(train_iters):
      loss = model.train_batch(batch)
      writer.scalar("loss", loss)
      writer.step()
    ```

    Args:
      logdir: Directory in which summary files will be written.
      max_queue: Number of summary items to buffer before flushing to
        filesystem. If 0, summaries will be flushed immediately.
      flush_secs: Number of secondsbetween forced commits to disk.
      filename_suffix: Suffix of the event protobuf files in which the summary
        data are stored.

    Raises:
      ValueError: If this constructor is called not under eager execution.
    """
        # TODO(apassos, ashankar): Make this class and the underlying
        # contrib.summary_ops compatible with graph model and remove this check.
        if not context.in_eager_mode():
            raise ValueError(
                "Use of SummaryWriter is currently supported only with eager "
                "execution enabled. File an issue at "
                "https://github.com/tensorflow/tensorflow/issues/new to express "
                "interest in fixing this.")

        # TODO(cais): Consider adding name keyword argument, which if None or empty,
        # will register the global global_step that training_util.get_global_step()
        # can find.
        with context.device(self._CPU_DEVICE):
            self._name = uuid.uuid4().hex
            self._global_step = 0
            self._global_step_tensor = variable_scope.get_variable(
                "global_step/summary_writer/" + self._name,
                shape=[],
                dtype=dtypes.int64,
                initializer=init_ops.zeros_initializer())
            self._global_step_dirty = False
            self._resource = gen_summary_ops.summary_writer(
                shared_name=self._name)
            gen_summary_ops.create_summary_file_writer(self._resource, logdir,
                                                       max_queue, flush_secs,
                                                       filename_suffix)
            # Delete the resource when this object is deleted
            self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
                handle=self._resource, handle_device=self._CPU_DEVICE)
Beispiel #23
0
  def __init__(self,  # pylint: disable=super-init-not-called
               initial_value=None,
               trainable=True,
               caching_device=None,
               name=None,
               dtype=None,
               constraint=None,
               **unused_kwargs):
    """Creates a variable.

    Args:
      initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called.
        (Note that initializer functions from init_ops.py must first be bound
         to a shape before being used here.)
      trainable: If `True`, GradientTapes automatically watch uses of this
        Variable.
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If None, either the datatype will be kept (if initial_value is
       a Tensor) or float32 will be used (if it is a Python object convertible
       to a Tensor).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value
        (which must have the same shape). Constraints are not safe to
        use when doing asynchronous distributed training.

    Raises:
      ValueError: If the initial value is not specified, or does not have a
        shape and `validate_shape` is `True`.
      RuntimeError: If called outside of a function definition.
    """
    if context.executing_eagerly():
      raise RuntimeError(
          "UnliftedInitializerVariable should not be created "
          "outside of functions.")
    with ops.init_scope():
      if not context.executing_eagerly():
        raise RuntimeError(
            "UnliftedInitializerVariable does not support legacy graph mode.")
    self._in_graph_mode = False
    if initial_value is None:
      raise ValueError("initial_value must be specified.")
    init_from_fn = callable(initial_value)

    if constraint is not None and not callable(constraint):
      raise ValueError("The `constraint` argument must be a callable.")

    if isinstance(initial_value, checkpointable.CheckpointInitialValue):
      self._maybe_initialize_checkpointable()
      self._update_uid = initial_value.checkpoint_position.restore_uid
      initial_value = initial_value.wrapped_value

    self._trainable = trainable
    self._save_slice_info = None
    self._initial_value = None
    self._initializer_op = None
    self._is_initialized_op = None
    self._graph_element = None
    self._cached_value = None
    # Store the graph key so optimizers know how to only retrieve variables from
    # this graph. Guaranteed to be the same as the eager graph_key.
    self._graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
    with ops.name_scope(name, "Variable", []
                        if init_from_fn else [initial_value]) as name:
      # pylint: disable=protected-access
      with ops.init_scope():
        assert context.executing_eagerly()
        shared_name = ops._name_from_scope_name(name)
        shared_name = "%s_%d" % (shared_name, ops.uid())
      # Use attr_scope and device(None) to simulate the behavior of
      # colocate_with when the variable we want to colocate with doesn't
      # yet exist.
      with ops.name_scope("Initializer"), ops.device(None):
        initial_value = ops.convert_to_tensor(
            initial_value() if init_from_fn else initial_value,
            name="initial_value", dtype=dtype)
      with ops.init_scope():
        self._handle = resource_variable_ops.eager_safe_variable_handle(
            shape=initial_value.get_shape(),
            dtype=initial_value.dtype.base_dtype,
            shared_name=shared_name,
            name=name,
            graph_mode=False)
      self._shape = initial_value.shape
      self._unique_id = shared_name
      self._handle_name = shared_name + ":0"
      self._dtype = initial_value.dtype.base_dtype
      self._constraint = constraint
      assert initial_value is not None
      def assign_fn():
        with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
          resource_variable_ops.assign_variable_op(
              self._handle,
              initial_value,
              name=n)
        # Returning values to keep tf.cond happy.
        return ops.convert_to_tensor(1)
      def not_assign_fn():
        return ops.convert_to_tensor(0)
      # Note: this cond is always guaranteed to run because we're inside a defun
      # which will insert automatic control dependencies.
      control_flow_ops.cond(
          resource_variable_ops.var_is_initialized_op(self._handle),
          not_assign_fn, assign_fn)

    # After the handle has been created, set up a way to clean it up when
    # executing eagerly. We'll hold the only reference to the deleter, so that
    # when this object is garbage collected the deleter will be too. This
    # means ResourceVariables can be part of reference cycles without those
    # cycles being uncollectable.
    self._handle_deleter = resource_variable_ops.EagerResourceDeleter(
        handle=self._handle, handle_device=self._handle.device)
    self._cached_shape_as_list = None
Beispiel #24
0
 def __init__(self, resource):
     self._resource = resource
     if context.executing_eagerly() and self._resource is not None:
         self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
             handle=self._resource, handle_device="cpu:0")