def testDynamicShapesWithRunOptionsDisableDynamicPadder(
            self, distribution):
        dataset = get_dataset_from_tensor_slices([5, 6, 7]).batch(4)
        mask_dataset = get_dataset_from_tensor_slices([1, 0, 1]).batch(4)
        dataset = dataset_ops.DatasetV2.zip((dataset, mask_dataset))

        input_iterator = iter(
            distribution.experimental_distribute_dataset(dataset))
        options = distribute_lib.RunOptions(
            experimental_xla_options=tpu.XLAOptions(
                enable_xla_dynamic_padder=False))

        @def_function.function
        def run(iterator):
            def computation(inputs):
                x, mask = inputs
                y = x * mask
                return math_ops.reduce_sum(y)

            inputs = next(iterator)
            outputs = distribution.experimental_local_results(
                distribution.run(computation, args=(inputs, ),
                                 options=options))
            return outputs

        # This assumes that there are exactly 2 replicas
        self.assertAllEqual([5, 7], run(input_iterator))
Exemplo n.º 2
0
    def experimental_run_v2(self, fn, args=(), kwargs=None, options=None):
        """See base class."""
        validate_experimental_run_function(fn)

        # Note: the target function is converted to graph even when in Eager mode,
        # so autograph is on by default here.
        fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
        options = options or distribute_lib.RunOptions()
        return self.extended.tpu_run(fn, args, kwargs, options)
Exemplo n.º 3
0
    def experimental_run_v2(self, fn, args=(), kwargs=None, options=None):
        """Run `fn` on each replica, with the given arguments.

    Executes ops specified by `fn` on each replica. If `args` or `kwargs` have
    "per-replica" values, such as those produced by a "distributed `Dataset`",
    when `fn` is executed on a particular replica, it will be executed with the
    component of those "per-replica" values that correspond to that replica.

    `fn` may call `tf.distribute.get_replica_context()` to access members such
    as `all_reduce`.

    All arguments in `args` or `kwargs` should either be nest of tensors or
    per-replica objects containing tensors or composite tensors.

    Users can pass strategy specific options to `options` argument. An example
    to enable bucketizing dynamic shapes in `TPUStrategy.experimental_run_v2`
    is:
    ```python

    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.experimental.TPUStrategy(tpu='')

    options = tf.distribute.RunOptions()
    options.experimental_bucketizing_dynamic_shape = True

    iterator = iter(inputs)

    @tf.function()
    def step_fn(inputs):
      output = tf.reduce_sum(inputs)
      return output

      strategy.experimental_run_v2(step_fn, args=(next(iterator),),
                                   options=options)
    ```

    Args:
      fn: The function to run. The output must be a `tf.nest` of `Tensor`s.
      args: (Optional) Positional arguments to `fn`.
      kwargs: (Optional) Keyword arguments to `fn`.
      options: (Optional) An instance of `tf.distribute.RunOptions` specifying
        the options to run `fn`.

    Returns:
      Merged return value of `fn` across replicas. The structure of the return
      value is the same as the return value from `fn`. Each element in the
      structure can either be "per-replica" `Tensor` objects or `Tensor`s
      (for example, if running on a single replica).
    """
        validate_experimental_run_function(fn)

        fn = autograph.tf_convert(fn, autograph_ctx.control_status_ctx())
        options = options or distribute_lib.RunOptions()
        return self.extended.tpu_run(fn, args, kwargs, options)
    def testDynamicShapesWithRunOptionsBucketizing(self, distribution):
        dataset = get_dataset_from_tensor_slices([5., 6., 7.]).batch(4)
        input_iterator = iter(
            distribution.experimental_distribute_dataset(dataset))
        options = distribute_lib.RunOptions(
            experimental_bucketizing_dynamic_shape=True)

        @def_function.function
        def run(iterator):
            def computation(x):
                return math_ops.reduce_mean(x)

            inputs = next(iterator)
            outputs = distribution.experimental_local_results(
                distribution.run(computation, args=(inputs, ),
                                 options=options))
            return outputs

        # This assumes that there are exactly 2 replicas
        self.assertAllEqual([5.5, 7.], run(input_iterator))