Exemplo n.º 1
0
  def Test(self):
    np_val = np.matrix(a_np_) * np.matrix(b_np_)

    use_gpu = True
    if a_np_.dtype is np.float16 and (
        not test_util.CudaSupportsHalfMatMulAndConv()):
      use_gpu = False
      print("Built without fp16 matmul support for Cuda, running test on CPU.")

    # Transpose and possibly conjugate a_np_ and b_np_ according to the
    # attributes such that tf.matmul(effective_a_np, effective_b_np, **kwargs)
    # results in a valid matrix multiplication and produces the same result as
    # np.matrix(a_np_) * np.matrix(b_np_)
    effective_a_np = _GetTransposedMatrices(a_np_, "a", kwargs_)
    effective_b_np = _GetTransposedMatrices(b_np_, "b", kwargs_)
    with self.test_session(use_gpu=use_gpu) as sess:
      if use_static_shape_:
        a = constant_op.constant(effective_a_np)
        b = constant_op.constant(effective_b_np)
        res = math_ops.matmul(a, b, **kwargs_)
        tf_val = res.eval()
      else:
        a = array_ops.placeholder(a_np_.dtype)
        b = array_ops.placeholder(b_np_.dtype)
        res = math_ops.matmul(a, b, **kwargs_)
        tf_val = sess.run(res, feed_dict={a: effective_a_np, b: effective_b_np})

    self.assertAllCloseAccordingToType(
        tf_val,
        np_val,
        float_rtol=2e-5,
        float_atol=2e-5,
        half_rtol=0.2,
        half_atol=0.2)
Exemplo n.º 2
0
 def _DtypesToTest(self, use_gpu):
   if use_gpu and not test_util.CudaSupportsHalfMatMulAndConv():
     return [dtypes.float32]
   else:
     # It is important that float32 comes before float16 here,
     # as we will be using its gradients as reference for fp16 gradients.
     return [dtypes.float32]
 def testHalfBasic(self):
     x = np.arange(1., 5.).reshape([4, 1]).astype(np.float16)
     y = np.arange(1., 3.).reshape([1, 2]).astype(np.float16)
     self._testCpuMatmul(x, y)
     if test_util.CudaSupportsHalfMatMulAndConv():
         self._testGpuMatmul(x, y)
     else:
         print("Built without fp16 matmul support, skipping GPU test.")
Exemplo n.º 4
0
 def testHalfRandomTransposeBoth(self):
   for _ in range(10):
     n, k, m = np.random.randint(1, 10, size=3)  # Smaller range than float.
     x = self._randMatrix(k, n, np.float16)
     y = self._randMatrix(m, k, np.float16)
     self._testCpuMatmul(x, y, True, True)
     if test_util.CudaSupportsHalfMatMulAndConv():
       self._testGpuMatmul(x, y, True, True)
     else:
       print("Built without fp16 matmul support, skipping GPU test.")
Exemplo n.º 5
0
  def _VerifyOneTest(self, pool_func, input_sizes, ksize, strides, padding,
                     data_format, expected, use_gpu):
    """Verifies the output values of the pooling function.

    Args:
      pool_func: Function to be called, co.MaxPool, co.AvgPool,
        or the Lua version.
      input_sizes: Input tensor dimensions.
      ksize: The kernel size dimensions
      strides: The stride dimensions
      padding: Padding type.
      data_format: The data format we use to run the pooling operation.
      expected: An array containing the expected operation outputs.
      use_gpu: Whether we are running on GPU.
    """
    self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
                        data_format, tf.float32, expected, use_gpu)

    if not use_gpu or test_util.CudaSupportsHalfMatMulAndConv():
      self._VerifyOneType(pool_func, input_sizes, ksize, strides, padding,
                          data_format, tf.float16, expected, use_gpu)
 def testHalfVector(self):
     self._vectorTest(np.float16, gpu=False)
     if test_util.CudaSupportsHalfMatMulAndConv():
         self._vectorTest(np.float16, gpu=True)
     else:
         print("Built without fp16 matmul support, skipping GPU test.")