def testReluGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(t):
            with tape_lib.GradientTape() as tape:
                tape.watch(t)
                result = unified_nn_ops.relu(t)
            grads = tape.gradient(result, t)
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            positive = TensorCastHelper(constant_op.constant([1.]))
            negative = TensorCastHelper(constant_op.constant([-1.]))

            model_fn = def_function.function(model)
            func_output = model_fn(positive)
            self.assertAllEqual(func_output.numpy(), [1.])
            func_output = model_fn(negative)
            self.assertAllEqual(func_output.numpy(), [0.])

            eager_output = model(positive)
            self.assertAllEqual(eager_output.numpy(), [1.])
            eager_output = model(negative)
            self.assertAllEqual(eager_output.numpy(), [0.])
    def testLog1p(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a):
            return unified_math_ops.log1p(a)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1.]))

            func_output = def_function.function(model)(a)
            self.assertArrayNear(func_output.numpy(), [0.69314], 0.001)

            eager_output = model(a)
            self.assertArrayNear(eager_output.numpy(), [0.69314], 0.001)
    def testNeg(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a):
            return unified_math_ops.neg(a)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([2.]))

            func_output = def_function.function(model)(a)
            self.assertAllEqual(func_output.numpy(), [-2.])

            eager_output = model(a)
            self.assertAllEqual(eager_output.numpy(), [-2.])
    def testDivNoNan(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            return unified_math_ops.div_no_nan(a, b)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([2.]))
            b = TensorCastHelper(constant_op.constant([4.]))

            func_output = def_function.function(model)(a, b)
            self.assertArrayNear(func_output.numpy(), [0.5], 0.001)

            eager_output = model(a, b)
            self.assertArrayNear(eager_output.numpy(), [0.5], 0.001)
示例#5
0
    def testAdd(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            return math_ops.add(a, b)

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1., 2.]))
            b = TensorCastHelper(constant_op.constant([3., 4.]))

            func_output = def_function.function(model)(a, b)
            self.assertAllEqual(func_output.numpy(), [4., 6.])

            eager_output = model(a, b)
            self.assertAllEqual(eager_output.numpy(), [4., 6.])
示例#6
0
  def testAdd(self, use_tfrt, use_mlir):
    if use_mlir:
      SetTracingImplementation("mlir")

    def model(a, b):
      return math_ops.add(a, b)

    eager_ctx = NewImmediateExecutionContext(use_tfrt)
    with context_lib.set_default(eager_ctx):
      a = eager_ctx.CreateFloatScalarHandle(1.)
      b = eager_ctx.CreateFloatScalarHandle(2.)

      func_output = def_function.function(model)(a, b)
      self.assertAllEqual(func_output.numpy(), 3.0)

      eager_output = model(a, b)
      self.assertAllEqual(eager_output.numpy(), 3.0)
    def testLog1pGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                result = unified_math_ops.log1p(a)
            grads = tape.gradient(result, a)
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1.]))

            func_outputs = def_function.function(model)(a)
            self.assertArrayNear(func_outputs.numpy(), [0.5], 0.001)

            eager_outputs = model(a)
            self.assertArrayNear(eager_outputs.numpy(), [0.5], 0.001)
    def testAddGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                tape.watch(b)
                result = unified_math_ops.add(a, b)
            grads = tape.gradient(result, [a, b])
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([1., 2.]))
            b = TensorCastHelper(constant_op.constant([3., 4.]))

            func_outputs = def_function.function(model)(a, b)
            self.assertAllEqual(func_outputs[0].numpy(), [1.0, 1.0])
            self.assertAllEqual(func_outputs[1].numpy(), [1.0, 1.0])

            eager_outputs = model(a, b)
            self.assertAllEqual(eager_outputs[0].numpy(), [1.0, 1.0])
            self.assertAllEqual(eager_outputs[1].numpy(), [1.0, 1.0])
    def testDivNoNanGrad(self, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                tape.watch(b)
                result = unified_math_ops.div_no_nan(a, b)
            grads = tape.gradient(result, [a, b])
            return grads

        with context_lib.set_default(get_immediate_execution_context()):
            a = TensorCastHelper(constant_op.constant([2.]))
            b = TensorCastHelper(constant_op.constant([4.]))

            func_outputs = def_function.function(model)(a, b)
            self.assertArrayNear(func_outputs[0].numpy(), [0.25], 0.001)
            self.assertArrayNear(func_outputs[1].numpy(), [-0.125], 0.001)

            eager_outputs = model(a, b)
            self.assertArrayNear(eager_outputs[0].numpy(), [0.25], 0.001)
            self.assertArrayNear(eager_outputs[1].numpy(), [-0.125], 0.001)
示例#10
0
    def testAddGrad(self, use_tfrt, use_mlir):
        if use_mlir:
            SetTracingImplementation("mlir")

        def model(a, b):
            with tape_lib.GradientTape() as tape:
                tape.watch(a)
                tape.watch(b)
                result = math_ops.add(a, b)
            grads = tape.gradient(result, [a, b])
            return grads

        eager_ctx = NewImmediateExecutionContext(use_tfrt)
        with context_lib.set_default(eager_ctx):
            a = eager_ctx.CreateFloatScalarHandle(1.)
            b = eager_ctx.CreateFloatScalarHandle(2.)

            func_outputs = def_function.function(model)(a, b)
            self.assertAllEqual(func_outputs[0].numpy(), 1.0)
            self.assertAllEqual(func_outputs[1].numpy(), 1.0)

            eager_outputs = model(a, b)
            self.assertAllEqual(eager_outputs[0].numpy(), 1.0)
            self.assertAllEqual(eager_outputs[1].numpy(), 1.0)