Beispiel #1
0
  def __call__(self, *args, **kwds):
    """Calls the graph function."""
    if self._created_variables:
      # In this case we have created variables on the first call, so we run the
      # defunned version which is guaranteed to never create variables.
      return self._stateless_fn(*args, **kwds)  # pylint: disable=not-callable
    elif self._stateful_fn is not None:
      # In this case we have not created variables on the first call. So we can
      # run the first trace but we should fail if variables are created.
      results = self._first_trace(*args, **kwds)
      if self._created_variables:
        raise ValueError("Creating variables on a non-first call to a function"
                         " decorated with tf.function.")
      return results

    self._initialize(args, kwds)

    if not self._created_variables:
      # If we did not create any variables the trace we have is good enough.
      return _call_concrete(self._concrete_stateful_fn, args, kwds)()

    def fn_with_cond(*inner_args, **inner_kwds):
      """Conditionally runs initialization if it's needed."""
      condition = True
      for variable in self._created_variables:
        condition = condition and resource_variable_ops.var_is_initialized_op(
            variable.handle)
      # We want to call stateless_fn if possible because it avoids recomputing
      # potentially expensive initializers.
      return control_flow_ops.cond(
          condition,
          lambda: self._stateless_fn(*inner_args, **inner_kwds),
          _call_concrete(self._concrete_stateful_fn, inner_args, inner_kwds))

    return function_lib.defun(fn_with_cond)(*args, **kwds)
Beispiel #2
0
  def __init__(self, name=None, use_global_variables=False):
    self._built = False
    self._vars = []
    self._initial_values = {}
    self._updates = []
    self._use_global_variables = use_global_variables
    name = name or self.__class__.__name__
    # Replace things like spaces in name to create a valid scope name.
    scope_name = _to_replace.sub("_", name)
    # We create the variable scope now to get the unique name that will
    # be used as a variable prefix when build() calls add_variable().
    with variable_scope.variable_scope(
        scope_name, use_resource=True, reuse=False) as scope:
      pos = scope.name.rfind(scope_name)
      self._name = name + scope.name[pos + len(scope_name):]
      self._scope = scope

    # Ensures that if the user calls build directly we still set self._built to
    # True to prevent variables from being recreated.
    self._build = self.build

    def actual_build(*args, **kwargs):
      self._build(*args, **kwargs)
      self._built = True
    self.build = actual_build
    self.build.__doc__ = self._build.__doc__

    # Captures construction scope for proper initialization.
    if context.executing_eagerly():
      self._construction_scope = context.eager_mode
    else:
      # We make self.call() into a graph callable here, so that we can
      # return a single op that performs all of the variable updates.
      self._construction_scope = ops.get_default_graph().as_default
      self.call = function.defun(self.call)
Beispiel #3
0
 def testSequenceInputs(self):
   clip_by_global_norm = function.defun(clip_ops.clip_by_global_norm)
   t_list = [tensor.Tensor(1.0), tensor.Tensor(2.0)]
   clipped_list, global_norm = clip_by_global_norm(t_list, tensor.Tensor(.2))
   for t in clipped_list:
     self.assertTrue(isinstance(t, tensor.Tensor))
   self.assertTrue(isinstance(global_norm, tensor.Tensor))
Beispiel #4
0
def _defun_with_scope(scope, fn):

  def wrapped_fn(*args, **kwds):
    with variable_scope.variable_creator_scope(scope):
      return fn(*args, **kwds)

  return function.defun(wrapped_fn)
 def testBasic(self):
   matmul = function.defun(math_ops.matmul)
   t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
   sq = matmul(t, t, transpose_a=True)
   sq2 = matmul(sq, t, transpose_a=True)
   self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
   self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
Beispiel #6
0
def _defun_with_scope(scope, fn, input_signature):

  def wrapped_fn(*args, **kwds):
    with variable_scope.variable_creator_scope(scope):
      return fn(*args, **kwds)

  return function_lib.defun(wrapped_fn, input_signature=input_signature)
Beispiel #7
0
  def _defun_with_scope(self, scope):
    """Creates a defun wrapped inside a variable creator scope."""

    weak_wrapped_fn = None
    def wrapped_fn(*args, **kwds):
      """Wraps `self._python_function` in a variable creator scope."""
      # We register a variable creator with reduced priority. If an outer
      # variable creator is just modifying keyword arguments to the variable
      # constructor, this will work harmoniously. Since the `scope` registered
      # here actually creates the variable, it taking priority would otherwise
      # ignore the outer creator.
      #
      # If an outer variable creator calls the variable constructor manually,
      # for example creating a MirroredVariable, then they won't call our
      # creator. This means we won't be able to trace the initialization graph,
      # and so variable initializers can't depend on function arguments. This is
      # better than the alternative, tracing the initialization graph but giving
      # the user a variable type they didn't want.
      with ops.get_default_graph()._variable_creator_scope(scope, priority=50):  # pylint: disable=protected-access
        # __wrapped__ allows AutoGraph to swap in a converted function. We give
        # the function a weak reference to itself to avoid a reference cycle.
        return weak_wrapped_fn().__wrapped__(*args, **kwds)
    weak_wrapped_fn = weakref.ref(wrapped_fn)

    # TODO(mdan): Pipe self._experimental_autograph_options through.
    return function_lib.defun(
        tf_decorator.make_decorator(self._python_function, wrapped_fn),
        input_signature=self._input_signature,
        autograph=self._autograph,
        experimental_autograph_options=self._experimental_autograph_options)
Beispiel #8
0
 def _benchmark_defun_matmul(self,
                             m,
                             transpose_b,
                             num_iters,
                             execution_mode=None):
   f = function.defun(math_ops.matmul)
   func = lambda: f(m, m, transpose_b=transpose_b)
   self._run(func, num_iters, execution_mode=execution_mode)
Beispiel #9
0
  def testFunctionOnDevice(self):
    if not context.context().num_gpus():
      self.skipTest('No GPUs found')

    x = tensor.Tensor([1.]).as_gpu_tensor()
    f = function.defun(math_ops.add)
    y = f(x, x).as_cpu_tensor()
    self.assertAllEqual(y.numpy(), [2.])
Beispiel #10
0
 def _defun(self, fn):
   """Returns a defun generated from the input function."""
   # TODO(mdan): Pipe self._experimental_autograph_options through.
   return function_lib.defun(
       fn,
       input_signature=self.input_signature,
       autograph=self._autograph,
       experimental_autograph_options=self._experimental_autograph_options)
Beispiel #11
0
  def testFunctionOnDevice(self):
    if not context.context().num_gpus():
      self.skipTest('No GPUs found')

    x = constant_op.constant([1.]).gpu()
    f = function.defun(math_ops.add)
    y = f(x, x).cpu()
    self.assertAllEqual(y, [2.])
Beispiel #12
0
  def testGradient(self):
    matmul = function.defun(math_ops.matmul)

    def sq(x):
      return matmul(x, x, transpose_a=True)

    t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
    grad_t, = backprop.gradients_function(sq, [0])(t)
    self.assertAllEqual(grad_t, [[6, 6], [14, 14]])
Beispiel #13
0
  def testGatherResourceWithDefun(self):
    with ops.device('cpu:0'):
      v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])

    def sum_gather():
      return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))

    defined = function.defun(sum_gather)
    self.assertAllEqual(sum_gather(), defined())
Beispiel #14
0
  def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
    if not context.context().num_gpus():
      self.skipTest('No GPUs found')

    # The Reshape op requires the shape tensor to be placed in host memory.
    reshape = function.defun(array_ops.reshape)
    value = constant_op.constant([1., 2.])
    shape = constant_op.constant([2, 1]).gpu()
    reshape(value, shape)  # No error is raised
Beispiel #15
0
  def __init__(self, name, func, create_scope_now=False, unique_name=None,
               custom_getter=None, create_graph_function=False):
    """Creates a template for the given function.

    Args:
      name: A name for the scope created by this template. The
        name will be made unique by appending `_N` to the it (see how
        `tf.variable_scope` treats the `default_name` for details).
      func: The function to apply each time.
      create_scope_now: Whether to create the scope at Template construction
        time, rather than first call. Defaults to false. Creating the scope at
        construction time may be more convenient if the template is to passed
        through much lower level code, and you want to be sure of the scope
        name without knowing exactly where it will be first called. If set to
        True, the scope will be created in the constructor, and all subsequent
        times in `__call__`, leading to a trailing numeral being added to the
        names of all created Tensors. If set to False, the scope will be created
        at the first call location.
      unique_name: When used, it overrides `name` and is not made unique. If a
        template of the same scope/unique_name already exists and reuse is
        false, an error is raised. Defaults to None.
      custom_getter: optional custom getter to pass to `variable_scope()`
      create_graph_function: When True, `func` will be executed as a graph
        function. Enabling this flag gives the caller access to graph-function
        semantics, i.e., accesses to variables are totally ordered and
        side-effecting ops are not pruned.

    Raises:
      ValueError: if `name` is None.
    """
    if create_graph_function:
      self._func = function.defun(func)
    else:
      self._func = func
    self._stacktrace = traceback.format_stack()[:-2]
    self._name = name
    self._unique_name = unique_name
    self._custom_getter = custom_getter
    if name is None:
      raise ValueError("name cannot be None.")
    if create_scope_now:
      with variable_scope._pure_variable_scope(  # pylint:disable=protected-access
          (self._unique_name or
           variable_scope._get_unique_variable_scope(self._name)),  # pylint:disable=protected-access
          custom_getter=self._custom_getter) as vs:
        self._variable_scope = vs
    else:
      self._variable_scope = None
    # This variable keeps track of whether the template has been called to
    # completion, which is not the same as whether the scope has been created.
    self._variables_created = False
    # `MirroredStrategy` builds the graph with multiple threads. If a
    # `merge_call` happens within a template, multiple calls may be in progress
    # simultaneously. This variable keeps track of whether any call of the
    # template has started.
    self._first_call = True
Beispiel #16
0
  def testDefunShapeInferenceWithCapturedResourceVariable(self):
    v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])

    def f():
      x = constant_op.constant([[1, 2], [3, 4]])
      out = math_ops.matmul(v, x)
      self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))

    compiled = function.defun(f)
    compiled()
Beispiel #17
0
  def testFunctionHandlesInputsOnDifferentDevices(self):
    if not context.context().num_gpus():
      self.skipTest('No GPUs found')

    # The Reshape op requires the shape tensor to be placed in host memory.
    reshape = function.defun(array_ops.reshape)
    value = constant_op.constant([1., 2.]).gpu()
    shape = constant_op.constant([2, 1])
    reshaped = reshape(value, shape).cpu()
    self.assertAllEqual(reshaped, [[1], [2]])
  def testEagerPyFuncInDefun(self):

    def wrapper():
      a = array_ops.ones((3, 3), dtype=dtypes.int32)
      x = array_ops.ones((3, 1), dtype=dtypes.int32)
      return script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.int32)

    wrapped = function.defun(wrapper)
    ret = self.evaluate(wrapped())
    self.assertAllEqual(ret, [[3], [3], [3]])
Beispiel #19
0
  def testEagerPyFuncInDefun(self):
    with test_util.device(use_gpu=True):
      def wrapper():
        a = array_ops.ones((3, 3), dtype=dtypes.float32)
        x = array_ops.ones((3, 1), dtype=dtypes.float32)
        return script_ops.eager_py_func(matmul, inp=[a, x], Tout=dtypes.float32)

      wrapped = function.defun(wrapper)
      ret = self.evaluate(wrapped())
      self.assertAllEqual(ret, [[3.0], [3.0], [3.0]])
Beispiel #20
0
  def testBasicGraphMode(self):
    matmul = function.defun(math_ops.matmul)

    @function.defun
    def sq(a):
      return matmul(a, a)

    t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
    out = sq(t)
    self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
Beispiel #21
0
  def benchmark_defun_without_signature(self):

    def func(t1, t2, t3, t4, t5, t6, t7, t8):
      del t1, t2, t3, t4, t5, t6, t7, t8
      return None

    defined = function.defun(func)
    t = constant_op.constant(0.0)
    cache_computation = lambda: defined(t, t, t, t, t, t, t, t)
    self._run(cache_computation, 30000)
Beispiel #22
0
  def testDefunMatmul(self):
    """Basic remote eager execution with defun."""

    mm_defun = function.defun(math_ops.matmul)
    with ops.device("job:%s/replica:0/task:1/device:CPU:0" % JOB_NAME):
      x1 = array_ops.ones([2, 2])
    with ops.device("job:%s/replica:0/task:2/device:CPU:0" % JOB_NAME):
      x2 = array_ops.ones([2, 2])
      y = mm_defun(x1, x2)
    np.testing.assert_array_equal([[2, 2], [2, 2]], y.numpy())
Beispiel #23
0
  def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
    if not context.context().num_gpus():
      self.skipTest('No GPUs found')

    # The Reshape op requires the shape tensor to be placed in host memory.
    reshape = function.defun(array_ops.reshape)
    value = tensor.Tensor([1., 2.]).as_gpu_tensor()
    shape = tensor.Tensor([2, 1]).as_gpu_tensor()
    with self.assertRaises(errors.InvalidArgumentError):
      reshape(value, shape)
Beispiel #24
0
  def benchmark_defun_with_signature(self):

    def func(t1, t2, t3, t4, t5, t6, t7, t8):
      del t1, t2, t3, t4, t5, t6, t7, t8
      return None

    defined = function.defun(
        func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
    t = constant_op.constant(0.0)
    signature_computation = lambda: defined(t, t, t, t, t, t, t, t)
    self._run(signature_computation, 30000)
Beispiel #25
0
  def benchmark_defun_without_signature_and_with_kwargs(self):

    def func(t1, t2, t3, t4, t5, t6, t7, t8):
      del t1, t2, t3, t4, t5, t6, t7, t8
      return None

    defined = function.defun(func)
    t = constant_op.constant(0.0)
    def cache_computation():
      return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
    self._run(cache_computation, 30000)
  def testReturningNonTensorRaisesError(self):
    optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)
    optimizer.apply_gradients = function.defun(optimizer.apply_gradients)
    v = resource_variable_ops.ResourceVariable(1.0)
    grad = backprop.implicit_grad(lambda v: v**2)(v)

    with self.assertRaisesRegexp(TypeError,
                                 '.*must return zero or more Tensors.*'):
      # TODO(akshayka): We might want to allow defun-ing Python functions
      # that return operations (and just execute the op instead of running it).
      optimizer.apply_gradients(grad)
Beispiel #27
0
  def __new__(cls, *args, **kwargs):
    obj = super(Metric, cls).__new__(cls)
    # TODO(psv): Fix reference cycle issue here.

    # Converting update_state_fn() into a graph function, so that
    # we can return a single op that performs all of the variable updates.
    defuned_update_state_fn = function.defun(obj.update_state)
    obj.update_state = types.MethodType(
        update_state_wrapper(defuned_update_state_fn), obj)
    obj.result = types.MethodType(result_wrapper(obj.result), obj)
    return obj
Beispiel #28
0
  def benchmark_defun_with_signature_and_kwargs(self):

    def func(t1, t2, t3, t4, t5, t6, t7, t8):
      del t1, t2, t3, t4, t5, t6, t7, t8
      return None

    defined = function.defun(
        func, input_signature=[tensor_spec.TensorSpec([], dtypes.float32)] * 8)
    t = constant_op.constant(0.0)
    def signature_computation():
      return defined(t1=t, t2=t, t3=t, t4=t, t5=t, t6=t, t7=t, t8=t)
    self._run(signature_computation, 30000)
Beispiel #29
0
  def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
    with context.graph_mode():
      v = variables.Variable([[1, 2], [3, 4]])

      def f():
        x = constant_op.constant([[1, 2], [3, 4]])
        out = math_ops.matmul(v, x)
        self.assertEqual(out.get_shape(), tensor_shape.TensorShape([2, 2]))

      # Check that shape inference works while creating the defun
      compiled = function.defun(f)
      compiled()
Beispiel #30
0
    def validate(indexed_slice):
      def f():
        return indexed_slice

      output = function.defun(f)()
      self.assertTrue(isinstance(output, ops.IndexedSlices))
      self.assertAllEqual(indexed_slice.values, output.values)
      self.assertAllEqual(indexed_slice.indices, output.indices)
      self.assertAllEqual(indexed_slice.dense_shape, output.dense_shape)

      self.assertEqual(
          function.make_defun_op(f).output_shapes, indexed_slice.values.shape)
Beispiel #31
0
    def testNestedSequenceInputs(self):
        def my_op(inputs):
            a, b, c = inputs
            e, f = b
            g, h = e
            return [a + a, [tuple([f + f, g + g]), h + h],
                    c + c], a + f + g + h + c

        my_eager_op = function.defun(my_op)
        ret = my_eager_op([
            constant_op.constant(1),
            [(constant_op.constant(2), constant_op.constant(3)),
             constant_op.constant(4)],
            constant_op.constant(5)
        ])
        self.assertEqual(len(ret), 2)
        self.assertAllEqual(ret[0][0], 2)
        self.assertAllEqual(ret[0][1][0][0], 8)
        self.assertAllEqual(ret[0][1][0][1], 4)
        self.assertTrue(isinstance(ret[0][1][0], tuple))
        self.assertAllEqual(ret[0][1][1], 6)
        self.assertAllEqual(ret[0][2], 10)
        self.assertAllEqual(ret[1], 15)
Beispiel #32
0
 def __init__(self, name=None):
     self._built = False
     self._vars = []
     self._initial_values = {}
     self._updates = []
     name = name or self.__class__.__name__
     # Replace things like spaces in name to create a valid scope name.
     scope_name = _to_replace.sub("_", name)
     # We create the variable scope now to get the unique name that will
     # be used as a variable prefix when build() calls add_variable().
     with variable_scope.variable_scope(scope_name,
                                        use_resource=True,
                                        reuse=False) as scope:
         pos = scope.name.rfind(scope_name)
         self._name = name + scope.name[pos + len(scope_name):]
         self._scope = scope
     if context.in_graph_mode():
         # We make self.call() into a graph callable here, so that we can
         # return a single op that performs all of the variable updates.
         self._construction_scope = ops.get_default_graph().as_default
         self.call = function.defun(self.call)
     else:
         self._construction_scope = context.eager_mode
Beispiel #33
0
    def testNestedSequenceInputs(self):
        def my_op(inputs):
            a, b, c = inputs
            e, f = b
            g, h = e
            return [a + a, [tuple([f + f, g + g]), h + h],
                    c + c], a + f + g + h + c

        my_eager_op = function.defun(my_op)
        ret = my_eager_op([
            tensor.Tensor(1),
            [(tensor.Tensor(2), tensor.Tensor(3)),
             tensor.Tensor(4)],
            tensor.Tensor(5)
        ])
        self.assertEqual(len(ret), 2)
        self.assertEqual(ret[0][0].numpy(), 2)
        self.assertEqual(ret[0][1][0][0].numpy(), 8)
        self.assertEqual(ret[0][1][0][1].numpy(), 4)
        self.assertTrue(isinstance(ret[0][1][0], tuple))
        self.assertEqual(ret[0][1][1].numpy(), 6)
        self.assertEqual(ret[0][2].numpy(), 10)
        self.assertEqual(ret[1].numpy(), 15)
    def __init__(self, name=None, use_global_variables=False):
        self._built = False
        self._vars = []
        self._initial_values = {}
        self._updates = []
        self._use_global_variables = use_global_variables
        name = name or self.__class__.__name__
        # Replace things like spaces in name to create a valid scope name.
        scope_name = _to_replace.sub("_", name)
        # We create the variable scope now to get the unique name that will
        # be used as a variable prefix when build() calls add_variable().
        with variable_scope.variable_scope(scope_name,
                                           use_resource=True,
                                           reuse=False) as scope:
            pos = scope.name.rfind(scope_name)
            self._name = name + scope.name[pos + len(scope_name):]
            self._scope = scope

        # Ensures that if the user calls build directly we still set self._built to
        # True to prevent variables from being recreated.
        self._build = self.build

        def actual_build(*args, **kwargs):
            self._build(*args, **kwargs)
            self._built = True

        self.build = actual_build
        self.build.__doc__ = self._build.__doc__

        # Captures construction scope for proper initialization.
        if context.executing_eagerly():
            self._construction_scope = context.eager_mode
        else:
            # We make self.call() into a graph callable here, so that we can
            # return a single op that performs all of the variable updates.
            self._construction_scope = ops.get_default_graph().as_default
            self.call = function.defun(self.call)
Beispiel #35
0
    def __call__(self, *args, **kwds):
        """Calls the graph function."""
        if self._created_variables:
            # In this case we have created variables on the first call, so we run the
            # defunned version which is guaranteed to never create variables.
            return self._stateless_fn(*args, **kwds)  # pylint: disable=not-callable
        elif self._stateful_fn is not None:
            # In this case we have not created variables on the first call. So we can
            # run the first trace but we should fail if variables are created.
            results = self._first_trace(*args, **kwds)
            if self._created_variables:
                raise ValueError(
                    "Creating variables on a non-first call to a function"
                    " decorated with tf.function.")
            return results

        self._initialize(args, kwds)

        if not self._created_variables:
            # If we did not create any variables the trace we have is good enough.
            return _call_concrete(self._concrete_stateful_fn, args, kwds)()

        def fn_with_cond(*inner_args, **inner_kwds):
            """Conditionally runs initialization if it's needed."""
            condition = True
            for variable in self._created_variables:
                condition = condition and resource_variable_ops.var_is_initialized_op(
                    variable.handle)
            # We want to call stateless_fn if possible because it avoids recomputing
            # potentially expensive initializers.
            return control_flow_ops.cond(
                condition,
                lambda: self._stateless_fn(*inner_args, **inner_kwds),
                _call_concrete(self._concrete_stateful_fn, inner_args,
                               inner_kwds))

        return function_lib.defun(fn_with_cond)(*args, **kwds)
Beispiel #36
0
    def all_metric_results(self, summary_logdir=None):
        """Computes results for all contained metrics.

    Args:
      summary_logdir: An optional string. If specified, metric results
        will be written as summaries to this directory.

    Returns:
      A `dict` mapping string names to tensors.
    """
        if summary_logdir is None:
            with summary_ops.never_record_summaries():
                return self._all_metric_results()
        else:

            def f():
                with summary_ops.create_file_writer(summary_logdir).as_default(
                ), summary_ops.always_record_summaries():
                    return self._all_metric_results()

            if context.executing_eagerly():
                return f()
            else:
                return function.defun(f)()
Beispiel #37
0
    def _defun_with_scope(self, scope):
        """Creates a defun wrapped inside a variable creator scope."""

        weak_wrapped_fn = None

        def wrapped_fn(*args, **kwds):
            """Wraps `self._python_function` in a variable creator scope."""
            # We register a variable creator with reduced priority. If an outer
            # variable creator is just modifying keyword arguments to the variable
            # constructor, this will work harmoniously. Since the `scope` registered
            # here actually creates the variable, it taking priority would otherwise
            # ignore the outer creator.
            #
            # If an outer variable creator calls the variable constructor manually,
            # for example creating a MirroredVariable, then they won't call our
            # creator. This means we won't be able to trace the initialization graph,
            # and so variable initializers can't depend on function arguments. This is
            # better than the alternative, tracing the initialization graph but giving
            # the user a variable type they didn't want.
            with ops.get_default_graph()._variable_creator_scope(scope,
                                                                 priority=50):  # pylint: disable=protected-access
                # __wrapped__ allows AutoGraph to swap in a converted function. We give
                # the function a weak reference to itself to avoid a reference cycle.
                return weak_wrapped_fn().__wrapped__(*args, **kwds)

        weak_wrapped_fn = weakref.ref(wrapped_fn)

        # TODO(mdan): Pipe self._experimental_autograph_options through.
        return function_lib.defun(tf_decorator.make_decorator(
            self._python_function,
            wrapped_fn,
            decorator_argspec=self._function_spec.fullargspec),
                                  input_signature=self.input_signature,
                                  autograph=self._autograph,
                                  experimental_autograph_options=self.
                                  _experimental_autograph_options)
Beispiel #38
0
 def _benchmark_defun_matmul(self, m, transpose_b, num_iters):
   f = function.defun(math_ops.matmul)
   func = lambda: f(m, m, transpose_b)
   self._run(func, num_iters)
Beispiel #39
0
    def __call__(self, *args, **kwds):
        """Calls the graph function."""
        if self._created_variables:
            # In this case we have created variables on the first call, so we run the
            # defunned version which is guaranteed to never create variables.
            return self._stateless_fn(*args, **kwds)  # pylint: disable=not-callable
        elif self._stateful_fn is not None:
            # In this case we have not created variables on the first call. So we can
            # run the first trace but we should fail if variables are created.
            results = self._stateful_fn(*args, **kwds)
            if self._created_variables:
                raise ValueError(
                    "Creating variables on a non-first call to a function"
                    " decorated with tf.function.")
            return results

        # This is the first call of __call__, so we have to initialize.
        self._initialize(args, kwds)
        canon_args, canon_kwds = self._canonicalize_function_inputs(args, kwds)

        if not self._created_variables:
            # If we did not create any variables the trace we have is good enough.
            return self._concrete_stateful_fn._filtered_call(
                canon_args, canon_kwds)  # pylint: disable=protected-access

        def fn_with_cond(*inner_args, **inner_kwds):
            """Conditionally runs initialization if it's needed."""
            condition = True
            for wr in self._created_variables:
                variable = wr()
                if variable is None:
                    raise ValueError(
                        "A tf.Variable created inside your tf.function has been"
                        " garbage-collected. Your code needs to keep Python references"
                        " to variables created inside `tf.function`s.\n"
                        "\n"
                        "A common way to raise this error is to create and return a"
                        " variable only referenced inside your function:\n"
                        "\n"
                        "@tf.function\n"
                        "def f():\n"
                        "  v = tf.Variable(1.0)\n"
                        "  return v\n"
                        "\n"
                        "v = f()  # Crashes with this error message!\n"
                        "\n"
                        "The reason this crashes is that @tf.function annotated"
                        " function returns a **`tf.Tensor`** with the **value** of the"
                        " variable when the function is called rather than the"
                        " variable instance itself. As such there is no code holding a"
                        " reference to the `v` created inside the function and Python"
                        " garbage collects it.\n"
                        "\n"
                        "The simplest way to fix this issue is to create variables"
                        " outside the function and capture them:\n"
                        "\n"
                        "v = tf.Variable(1.0)\n"
                        "\n"
                        "@tf.function\n"
                        "def f():\n"
                        "  return v\n"
                        "\n"
                        "f()  # <tf.Tensor: ... numpy=1.>\n"
                        "v.assign_add(1.)\n"
                        "f()  # <tf.Tensor: ... numpy=2.>")
                condition = math_ops.logical_and(
                    condition,
                    resource_variable_ops.var_is_initialized_op(
                        variable.handle))
            # We want to call stateless_fn if possible because it avoids recomputing
            # potentially expensive initializers.
            return control_flow_ops.cond(
                condition,
                lambda: self._stateless_fn(*inner_args, **inner_kwds),
                functools.partial(
                    self._concrete_stateful_fn._filtered_call,  # pylint: disable=protected-access
                    inner_args,
                    inner_kwds))

        return function_lib.defun(fn_with_cond)(*canon_args, **canon_kwds)
Beispiel #40
0
def _defun_with_scope(scope, fn):
    def wrapped_fn(*args, **kwds):
        with variable_scope.variable_creator_scope(scope):
            return fn(*args, **kwds)

    return function.defun(wrapped_fn)
Beispiel #41
0
 def __init__(self, model):
     self._model = model
     self._metrics = {}
     self._evaluators = {}
     if not context.executing_eagerly():
         self.call = function.defun(self.call)
Beispiel #42
0
        )
    return _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents)


# TODO(allenl): reduce_retracing for gradients which rely on static
# shape information are underspecialized. We may want hand-written forward
# implementations, or a more satisfying story about how we re-specialize
# gradients which were traced with relaxed shapes (e.g. use conds instead of
# trace-time Python logic).
#
# Using function.defun rather than def_function.function avoids
# tf.config.run_functions_eagerly(True). `_jvp_helper` doesn't successfully run
# eagerly (infinite recursion), and even if it did it would use extra memory and
# run unnecessary computation. The function does not create variables, so the
# two symbols are otherwise equivalent.
_jvp_relaxed_shapes = function.defun(_jvp_helper_wrapper,
                                     reduce_retracing=True)
_jvp_exact_shapes = function.defun(_jvp_helper_wrapper, reduce_retracing=False)

# The maximum number of exact-shape traces to perform for a single op before
# switching to shape relaxation.
_TRACE_COUNT_LIMIT = 32


def _jvp_dispatch(op_name,
                  attr_tuple,
                  inputs,
                  outputs,
                  tangents,
                  use_batch=False):
    """Determine which forwardprop function to call."""
    # Note that this _TRACE_COUNT read races with writes. That's fine, it just
Beispiel #43
0
 def __init__(self, model):
     self._model = model
     self._metrics = {}
     self._evaluators = {}
     if context.in_graph_mode():
         self.call = function.defun(self.call)
Beispiel #44
0
def _defun_with_scope(scope, fn, input_signature):
    def wrapped_fn(*args, **kwds):
        with variable_scope.variable_creator_scope(scope):
            return fn(*args, **kwds)

    return function_lib.defun(wrapped_fn, input_signature=input_signature)
Beispiel #45
0
        )
    return _jvp_helper(op_name, attr_tuple, inputs, outputs, tangents)


# TODO(allenl): experimental_relax_shapes for gradients which rely on static
# shape information are underspecialized. We may want hand-written forward
# implementations, or a more satisfying story about how we re-specialize
# gradients which were traced with relaxed shapes (e.g. use conds instead of
# trace-time Python logic).
#
# Using function.defun rather than def_function.function avoids
# tf.config.run_functions_eagerly(True). `_jvp_helper` doesn't successfully run
# eagerly (infinite recursion), and even if it did it would use extra memory and
# run unnecessary computation. The function does not create variables, so the
# two symbols are otherwise equivalent.
_jvp_relaxed_shapes = function.defun(_jvp_helper_wrapper,
                                     experimental_relax_shapes=True)
_jvp_exact_shapes = function.defun(_jvp_helper_wrapper,
                                   experimental_relax_shapes=False)

# The maximum number of exact-shape traces to perform for a single op before
# switching to shape relaxation.
_TRACE_COUNT_LIMIT = 32


def _jvp_dispatch(op_name,
                  attr_tuple,
                  inputs,
                  outputs,
                  tangents,
                  use_batch=False):
    """Determine which forwardprop function to call."""
Beispiel #46
0
 def __init__(self, name=None):
     super(MySequential, self).__init__(name=name)
     self.call = function.defun(self.call)
def pfor(loop_fn, iters, parallel_iterations=None):
    """Equivalent to running `loop_fn` `iters` times and stacking the outputs.

  `pfor` has functionality similar to `for_loop`, i.e. running `loop_fn` `iters`
  times, with input from 0 to `iters - 1`, and stacking corresponding output of
  each iteration. However the implementation does not use a tf.while_loop.
  Instead it adds new operations to the graph that collectively compute the same
  value as what running `loop_fn` in a loop would compute.


  This is an experimental feature and currently has a lot of limitations:
    - There should be no data dependency between the different iterations. For
      example, a future iteration should not depend on a value or side-effect of
      a previous iteration.
    - Stateful kernels may mostly not be supported since these often imply a
      data dependency or ordering of the iterations. We do support a limited set
      of such stateful kernels though (like RandomFoo, Variable operations like
      reads, etc).
    - Conversion works only on a limited set of kernels for which a converter
      has been registered.
    - loop_fn has limited support for control flow operations. tf.cond in
      particular is not supported.
    - `loop_fn` should return nested structure of Tensors or Operations. However
      if an Operation is returned, it should have zero outputs.
    - The shape and dtype of `loop_fn` outputs should not depend on the input
      to loop_fn.

  Args:
    loop_fn: A function that takes an int32 scalar tf.Tensor object representing
      the iteration number, and optionally a keyword argument `pfor_config` set
      to a PForConfig object. It returns a possibly nested structure of Tensor
      or Operation objects. Note that if setting `parallel_iterations` argument
      to something other than None, `loop_fn` may be called more than once
      during graph construction. So it may need to avoid mutating global state.
    iters: Number of iterations for which to run loop_fn.
    parallel_iterations: A knob to control how many iterations are vectorized
      and dispatched in parallel. The default value of None corresponds to
      vectorizing all the iterations.  If `parallel_iterations` is smaller than
      `iters`, then chunks of at most that many iterations are dispatched in
      sequence. This knob can be used to control the total memory usage.

  Returns:
    Returns a nested structure of stacked tensor objects with the same nested
    structure as the output of `loop_fn`.
  Raises:
    ValueError: If parallel_iterations is not None and not an integer > 1.
  """
    def f():
        return _pfor_impl(loop_fn,
                          iters,
                          parallel_iterations=parallel_iterations)

    control_flow_context = ops.get_default_graph()._get_control_flow_context()  # pylint: disable=protected-access
    # Note that we wrap into a tf.function if in eager execution mode or under
    # XLA compilation. The latter is so that we don't compile operations like
    # tf.placeholder that are created by the loop body.
    if (context.executing_eagerly()
            or (control_flow_context is not None
                and control_flow_context.IsXLAContext())):
        f = function.defun(f)
    return f()
Beispiel #48
0
  def __call__(self, *args, **kwds):
    """Calls the graph function."""
    context.ensure_initialized()
    if RUN_FUNCTIONS_EAGERLY:
      return self._python_function(*args, **kwds)
    if self._created_variables:
      # In this case we have created variables on the first call, so we run the
      # defunned version which is guaranteed to never create variables.
      return self._stateless_fn(*args, **kwds)  # pylint: disable=not-callable
    elif self._stateful_fn is not None:
      # In this case we have not created variables on the first call. So we can
      # run the first trace but we should fail if variables are created.
      results = self._stateful_fn(*args, **kwds)
      if self._created_variables:
        raise ValueError("Creating variables on a non-first call to a function"
                         " decorated with tf.function.")
      return results

    # This is the first call of __call__, so we have to initialize.
    initializer_map = {}
    self._initialize(args, kwds, add_initializers_to=initializer_map)
    if self._created_variables:
      try:
        # Attempt to initialize variables eagerly and without conds by lifting
        # out initialization graphs. This is the only initialization strategy
        # compatible with XLA at the moment.
        self._initialize_uninitialized_variables(initializer_map)
      except lift_to_graph.UnliftableError:
        pass  # Fall through to cond-based initialization.
      else:
        # Lifting succeeded, so variables are initialized and we can run the
        # stateless function.
        return self._stateless_fn(*args, **kwds)
    else:
      canon_args, canon_kwds = \
          self._stateful_fn._function_spec.canonicalize_function_inputs(  # pylint: disable=protected-access
              *args, **kwds)
      # If we did not create any variables the trace we have is good enough.
      return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds)  # pylint: disable=protected-access

    def fn_with_cond(*inner_args, **inner_kwds):
      """Conditionally runs initialization if it's needed."""
      condition = True
      for wr in self._created_variables:
        variable = wr()
        if variable is None:
          raise ValueError(
              "A tf.Variable created inside your tf.function has been"
              " garbage-collected. Your code needs to keep Python references"
              " to variables created inside `tf.function`s.\n"
              "\n"
              "A common way to raise this error is to create and return a"
              " variable only referenced inside your function:\n"
              "\n"
              "@tf.function\n"
              "def f():\n"
              "  v = tf.Variable(1.0)\n"
              "  return v\n"
              "\n"
              "v = f()  # Crashes with this error message!\n"
              "\n"
              "The reason this crashes is that @tf.function annotated"
              " function returns a **`tf.Tensor`** with the **value** of the"
              " variable when the function is called rather than the"
              " variable instance itself. As such there is no code holding a"
              " reference to the `v` created inside the function and Python"
              " garbage collects it.\n"
              "\n"
              "The simplest way to fix this issue is to create variables"
              " outside the function and capture them:\n"
              "\n"
              "v = tf.Variable(1.0)\n"
              "\n"
              "@tf.function\n"
              "def f():\n"
              "  return v\n"
              "\n"
              "f()  # <tf.Tensor: ... numpy=1.>\n"
              "v.assign_add(1.)\n"
              "f()  # <tf.Tensor: ... numpy=2.>")
        condition = math_ops.logical_and(
            condition, resource_variable_ops.var_is_initialized_op(
                variable.handle))
      # We want to call stateless_fn if possible because it avoids recomputing
      # potentially expensive initializers.
      return control_flow_ops.cond(
          condition,
          lambda: self._stateless_fn(*inner_args, **inner_kwds),
          functools.partial(self._concrete_stateful_fn._filtered_call,  # pylint: disable=protected-access
                            inner_args, inner_kwds))

    # We've created variables and are unable to lift the initialization graphs,
    # so we fall back to initializing with conds while running the function.
    canon_args, canon_kwds = \
        self._stateful_fn._function_spec.canonicalize_function_inputs(  # pylint: disable=protected-access
            *args, **kwds)
    return function_lib.defun(fn_with_cond)(*canon_args, **canon_kwds)
Beispiel #49
0
  def __init__(self,
               name,
               func,
               create_scope_now=False,
               unique_name=None,
               custom_getter=None,
               create_graph_function=False):
    """Creates a template for the given function.

    Args:
      name: A name for the scope created by this template. The name will be made
        unique by appending `_N` to the it (see how
        `tf.compat.v1.variable_scope` treats the `default_name` for details).
      func: The function to apply each time.
      create_scope_now: Whether to create the scope at Template construction
        time, rather than first call. Defaults to false. Creating the scope at
        construction time may be more convenient if the template is to passed
        through much lower level code, and you want to be sure of the scope name
        without knowing exactly where it will be first called. If set to True,
        the scope will be created in the constructor, and all subsequent times
        in `__call__`, leading to a trailing numeral being added to the names of
        all created Tensors. If set to False, the scope will be created at the
        first call location.
      unique_name: When used, it overrides `name` and is not made unique. If a
        template of the same scope/unique_name already exists and reuse is
        false, an error is raised. Defaults to None.
      custom_getter: optional custom getter to pass to `variable_scope()`
      create_graph_function: When True, `func` will be executed as a graph
        function. Enabling this flag gives the caller access to graph-function
        semantics, i.e., accesses to variables are totally ordered and
        side-effecting ops are not pruned.

    Raises:
      ValueError: if `name` is None.
    """
    if create_graph_function:
      self._func = function.defun(func)
    else:
      self._func = func
    self._stacktrace = traceback.format_stack()[:-2]
    self._name = name
    self._unique_name = unique_name
    self._custom_getter = custom_getter
    if name is None:
      raise ValueError("name cannot be None.")
    if create_scope_now:
      with variable_scope._pure_variable_scope(  # pylint:disable=protected-access
          (self._unique_name or
           variable_scope._get_unique_variable_scope(self._name)),  # pylint:disable=protected-access
          custom_getter=self._custom_getter) as vs:
        self._variable_scope = vs
    else:
      self._variable_scope = None
    # This variable keeps track of whether the template has been called to
    # completion, which is not the same as whether the scope has been created.
    self._variables_created = False
    # `MirroredStrategy` builds the graph with multiple threads. If a
    # `merge_call` happens within a template, multiple calls may be in progress
    # simultaneously. This variable keeps track of whether any call of the
    # template has started.
    self._first_call = True
Beispiel #50
0
 def prepare(self):
     self.method = function.defun(self.method)
Beispiel #51
0
 def testBasic(self):
     with self.test_scope():
         matmul = function.defun(math_ops.matmul)
         t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
         sq = matmul(t, t, transpose_a=True)
         self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])