def testReluGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(t):
            with tape_lib.GradientTape() as tape:
                tape.watch(t)
                result = unified_nn_ops.relu(t)
            grads = tape.gradient(result, t)
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            positive = TensorCastHelper(constant_op.constant([1.]))
            negative = TensorCastHelper(constant_op.constant([-1.]))

            model_fn = def_function.function(model)
            func_output = model_fn(positive)
            self.assertAllEqual(func_output.numpy(), [1.])
            func_output = model_fn(negative)
            self.assertAllEqual(func_output.numpy(), [0.])

            eager_output = model(positive)
            self.assertAllEqual(eager_output.numpy(), [1.])
            eager_output = model(negative)
            self.assertAllEqual(eager_output.numpy(), [0.])
Пример #2
0
  def __call__(self, *args, **kwargs):
    # Flatten arguments.
    flat_args = nest.flatten(args, expand_composites=True)
    flat_kwargs = nest.flatten(kwargs, expand_composites=True)
    all_args = flat_args + flat_kwargs

    # Trace
    outer_ctx = context_lib.get_default()
    ctx = NewTracingContext(self.name)
    with context_lib.set_default(ctx):
      # TODO(srbs): Iterating over list of inputs is a known performance
      # bottleneck. Add a pybind API for this.
      inputs = [ctx.AddParameter(arg.DataType()) for arg in all_args]
      structured_args = nest.pack_sequence_as(args, inputs[:len(flat_args)])
      structured_kwargs = nest.pack_sequence_as(kwargs, inputs[len(flat_args):])
      structured_outputs = self._python_func(*structured_args,
                                             **structured_kwargs)

      py_outputs = nest.flatten(structured_outputs, expand_composites=True)
      num_outputs = len(py_outputs)
      # TODO(srbs): Drop Nones before calling Finalize.
      finalized_f = ctx.Finalize(py_outputs)
      outer_ctx.RegisterFunction(finalized_f)

    # Build call op
    call_op = outer_ctx.CreateOperation(self.name, "")
    call_op.SetOpName(self.name)
    for arg in all_args:
      call_op.AddInput(arg)
    call_op_outputs = call_op.Execute(num_outputs)

    # Cleanup
    outer_ctx.RemoveFunction(self.name)

    return nest.pack_sequence_as(structured_outputs, call_op_outputs)
    def testLog1p(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a):
            return unified_math_ops.log1p(a)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1.]))

            func_output = def_function.function(model)(a)
            self.assertArrayNear(func_output.numpy(), [0.69314], 0.001)

            eager_output = model(a)
            self.assertArrayNear(eager_output.numpy(), [0.69314], 0.001)
    def testNeg(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a):
            return unified_math_ops.neg(a)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([2.]))

            func_output = def_function.function(model)(a)
            self.assertAllEqual(func_output.numpy(), [-2.])

            eager_output = model(a)
            self.assertAllEqual(eager_output.numpy(), [-2.])
    def testDivNoNan(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            return unified_math_ops.div_no_nan(a, b)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([2.]))
            b = TensorCastHelper(constant_op.constant([4.]))

            func_output = def_function.function(model)(a, b)
            self.assertArrayNear(func_output.numpy(), [0.5], 0.001)

            eager_output = model(a, b)
            self.assertArrayNear(eager_output.numpy(), [0.5], 0.001)
Пример #6
0
    def testAdd(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            return math_ops.add(a, b)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1., 2.]))
            b = TensorCastHelper(constant_op.constant([3., 4.]))

            func_output = def_function.function(model)(a, b)
            self.assertAllEqual(func_output.numpy(), [4., 6.])

            eager_output = model(a, b)
            self.assertAllEqual(eager_output.numpy(), [4., 6.])
Пример #7
0
  def testAdd(self, use_tfrt, use_mlir):
    if use_mlir:
      SetTracingImplementation("mlir")

    def model(a, b):
      return math_ops.add(a, b)

    eager_ctx = NewImmediateExecutionContext(use_tfrt)
    with context_lib.set_default(eager_ctx):
      a = eager_ctx.CreateFloatScalarHandle(1.)
      b = eager_ctx.CreateFloatScalarHandle(2.)

      func_output = def_function.function(model)(a, b)
      self.assertAllEqual(func_output.numpy(), 3.0)

      eager_output = model(a, b)
      self.assertAllEqual(eager_output.numpy(), 3.0)
    def _computeMnistMlpGrads(self, math_ops_lib, nn_ops_lib, backprop_lib,
                              cast, num_iters, hidden_layers, hidden_size,
                              batch_size):
        batch_size = 1
        image_size = 28 * 28
        num_classes = 10

        def model(x, hidden_weights, softmax_weight, labels):
            with backprop_lib.GradientTape() as tape:
                for weight in hidden_weights + [softmax_weight]:
                    tape.watch(weight)
                for hidden_weight in hidden_weights:
                    x = math_ops_lib.mat_mul(x, hidden_weight)
                    x = nn_ops_lib.relu(x)
                logits = math_ops_lib.mat_mul(x, softmax_weight)
                loss = nn_ops_lib.sparse_softmax_cross_entropy_with_logits(
                    logits=logits, labels=labels)

            grads = tape.gradient(loss, hidden_weights + [softmax_weight])
            return grads

        x = maybe_cast(array_ops.ones([batch_size, image_size]), cast)
        hidden_weights = []
        for i in range(hidden_layers):
            hidden_weights.append(
                maybe_cast(
                    random_ops.random_uniform(
                        [hidden_size if i else image_size, hidden_size]),
                    cast))
        softmax_weight = maybe_cast(
            random_ops.random_uniform([hidden_size, num_classes]), cast)
        labels = maybe_cast(array_ops.zeros([batch_size], dtype=dtypes.int32),
                            cast)

        with context_lib.set_default(get_immediate_execution_context()):
            # Warm up.
            for _ in range(10):
                model(x, hidden_weights, softmax_weight, labels)
            runtimes = timeit.repeat(
                lambda: model(x, hidden_weights, softmax_weight, labels),
                repeat=num_iters,
                number=10)
        return min(runtimes) / 10
    def testLog1pGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                result = unified_math_ops.log1p(a)
            grads = tape.gradient(result, a)
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1.]))

            func_outputs = def_function.function(model)(a)
            self.assertArrayNear(func_outputs.numpy(), [0.5], 0.001)

            eager_outputs = model(a)
            self.assertArrayNear(eager_outputs.numpy(), [0.5], 0.001)
Пример #10
0
    def testAddGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                tape.watch(b)
                result = unified_math_ops.add(a, b)
            grads = tape.gradient(result, [a, b])
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1., 2.]))
            b = TensorCastHelper(constant_op.constant([3., 4.]))

            func_outputs = def_function.function(model)(a, b)
            self.assertAllEqual(func_outputs[0].numpy(), [1.0, 1.0])
            self.assertAllEqual(func_outputs[1].numpy(), [1.0, 1.0])

            eager_outputs = model(a, b)
            self.assertAllEqual(eager_outputs[0].numpy(), [1.0, 1.0])
            self.assertAllEqual(eager_outputs[1].numpy(), [1.0, 1.0])
Пример #11
0
    def testDivNoNanGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                tape.watch(b)
                result = unified_math_ops.div_no_nan(a, b)
            grads = tape.gradient(result, [a, b])
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([2.]))
            b = TensorCastHelper(constant_op.constant([4.]))

            func_outputs = def_function.function(model)(a, b)
            self.assertArrayNear(func_outputs[0].numpy(), [0.25], 0.001)
            self.assertArrayNear(func_outputs[1].numpy(), [-0.125], 0.001)

            eager_outputs = model(a, b)
            self.assertArrayNear(eager_outputs[0].numpy(), [0.25], 0.001)
            self.assertArrayNear(eager_outputs[1].numpy(), [-0.125], 0.001)
Пример #12
0
    def testAddGrad(self, use_tfrt, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                tape.watch(b)
                result = math_ops.add(a, b)
            grads = tape.gradient(result, [a, b])
            return grads

        eager_ctx = NewImmediateExecutionContext(use_tfrt)
        with context_lib.set_default(eager_ctx):
            a = eager_ctx.CreateFloatScalarHandle(1.)
            b = eager_ctx.CreateFloatScalarHandle(2.)

            func_outputs = def_function.function(model)(a, b)
            self.assertAllEqual(func_outputs[0].numpy(), 1.0)
            self.assertAllEqual(func_outputs[1].numpy(), 1.0)

            eager_outputs = model(a, b)
            self.assertAllEqual(eager_outputs[0].numpy(), 1.0)
            self.assertAllEqual(eager_outputs[1].numpy(), 1.0)
Пример #13
0
 def __enter__(self):
   """Enters a context inside which operations are recorded on this tape."""
   self._ctx_manager = context_stack.set_default(self._tape_context)
   self._ctx_manager.__enter__()
   return self