def test_dense_var_to_tensor_read_dtype_same_as_var_dtype(self):
    # read_dtype is same as dtype
    v = resource_variable_ops.ResourceVariable(1.0, dtype=dtypes.float32)
    v = resource_variable_ops._MixedPrecisionVariable(v, dtypes.float32)
    if not context.executing_eagerly():
      v.initializer.run()

    # dtype is not read_dtype, return NotImplemented
    self.assertEqual(
        NotImplemented, v._dense_var_to_tensor(dtype=dtypes.float16))
    self.assertEqual(NotImplemented,
                     v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=True))

    # as_ref is False
    t = v._dense_var_to_tensor(as_ref=False)
    self.assertTrue(isinstance(t, ops.Tensor))
    self.assertEqual(t.dtype, dtypes.float32)
    self.assertEqual(self.evaluate(t), 1.0)

    t = v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=False)
    self.assertTrue(isinstance(t, ops.Tensor))
    self.assertEqual(t.dtype, dtypes.float32)
    self.assertEqual(self.evaluate(t), 1.0)

    # as_ref is True
    self.assertEqual(NotImplemented, v._dense_var_to_tensor(as_ref=True))
    self.assertEqual(NotImplemented,
                     v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=True))
  def test_dense_var_to_tensor_read_dtype_same_as_var_dtype(self):
    # read_dtype is same as dtype
    v = resource_variable_ops.ResourceVariable(1.0, dtype=dtypes.float32)
    v = resource_variable_ops._MixedPrecisionVariable(v, dtypes.float32)
    if not context.executing_eagerly():
      v.initializer.run()

    # dtype is not read_dtype, return NotImplemented
    self.assertEqual(
        NotImplemented, v._dense_var_to_tensor(dtype=dtypes.float16))
    self.assertEqual(NotImplemented,
                     v._dense_var_to_tensor(dtype=dtypes.float16, as_ref=True))

    # as_ref is False
    t = v._dense_var_to_tensor(as_ref=False)
    self.assertTrue(isinstance(t, ops.Tensor))
    self.assertEqual(t.dtype, dtypes.float32)
    self.assertEqual(self.evaluate(t), 1.0)

    t = v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=False)
    self.assertTrue(isinstance(t, ops.Tensor))
    self.assertEqual(t.dtype, dtypes.float32)
    self.assertEqual(self.evaluate(t), 1.0)

    # as_ref is True
    self.assertEqual(NotImplemented, v._dense_var_to_tensor(as_ref=True))
    self.assertEqual(NotImplemented,
                     v._dense_var_to_tensor(dtype=dtypes.float32, as_ref=True))
Esempio n. 3
0
  def testFastpathExecute_MixedPrecisionVariableMatMulCorrectResponse(self):
    ctx = context.context()
    a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
    a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
    m = resource_variable_ops.ResourceVariable(a_2_by_2)
    m = resource_variable_ops._MixedPrecisionVariable(
        m, read_dtype=dtypes.float16)
    x = pywrap_tensorflow.TFE_Py_FastPathExecute(
        ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a",
        False, "transpose_b", False)
    y = pywrap_tensorflow.TFE_Py_FastPathExecute(
        ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16,
        a_2_by_2_fp16, "transpose_a", False, "transpose_b", False)

    self.assertEqual(x.dtype, dtypes.float16)
    self.assertAllEqual(x, y)
Esempio n. 4
0
  def testFastpathExecute_MixedPrecisionVariableMatMulCorrectResponse(self):
    ctx = context.context()
    a_2_by_2 = constant_op.constant(1.0, shape=[2, 2])
    a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
    m = resource_variable_ops.ResourceVariable(a_2_by_2)
    m = resource_variable_ops._MixedPrecisionVariable(
        m, read_dtype=dtypes.float16)
    x = pywrap_tensorflow.TFE_Py_FastPathExecute(
        ctx._handle, ctx.device_name, "MatMul", None, None, m, m, "transpose_a",
        False, "transpose_b", False)
    y = pywrap_tensorflow.TFE_Py_FastPathExecute(
        ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16,
        a_2_by_2_fp16, "transpose_a", False, "transpose_b", False)

    self.assertEqual(x.dtype, dtypes.float16)
    self.assertAllEqual(x, y)
Esempio n. 5
0
  def testFastpathExecute_MixedPrecisionVariableTapeWrite(self):
    ctx = context.context()
    with backprop.GradientTape(persistent=True) as tape:
      a_2_by_2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]],
                                      dtype=dtypes.float32)
      a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
      m1 = resource_variable_ops.ResourceVariable(a_2_by_2)
      m2 = resource_variable_ops._MixedPrecisionVariable(
          m1, read_dtype=dtypes.float16)
      tape.watch(m2)
      z = pywrap_tensorflow.TFE_Py_FastPathExecute(
          ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16, m2,
          "transpose_a", False, "transpose_b", False)
    dz_dy = tape.gradient(z, [m2])[0]
    self.assertEqual(dz_dy.dtype, dtypes.float16)

    expected_grads = math_ops.matmul(
        array_ops.transpose(a_2_by_2_fp16),
        constant_op.constant(1., shape=[2, 2], dtype=dtypes.float16)).numpy()
    self.assertAllEqual(dz_dy.numpy(), expected_grads)
Esempio n. 6
0
  def testFastpathExecute_MixedPrecisionVariableTapeWrite(self):
    ctx = context.context()
    with backprop.GradientTape(persistent=True) as tape:
      a_2_by_2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]],
                                      dtype=dtypes.float32)
      a_2_by_2_fp16 = math_ops.cast(a_2_by_2, dtype=dtypes.float16)
      m1 = resource_variable_ops.ResourceVariable(a_2_by_2)
      m2 = resource_variable_ops._MixedPrecisionVariable(
          m1, read_dtype=dtypes.float16)
      tape.watch(m2)
      z = pywrap_tensorflow.TFE_Py_FastPathExecute(
          ctx._handle, ctx.device_name, "MatMul", None, None, a_2_by_2_fp16, m2,
          "transpose_a", False, "transpose_b", False)
    dz_dy = tape.gradient(z, [m2])[0]
    self.assertEqual(dz_dy.dtype, dtypes.float16)

    expected_grads = math_ops.matmul(
        array_ops.transpose(a_2_by_2_fp16),
        constant_op.constant(1., shape=[2, 2], dtype=dtypes.float16)).numpy()
    self.assertAllEqual(dz_dy.numpy(), expected_grads)