Esempio n. 1
0
 def testIsGoogleCudaEnabled(self):
     # The test doesn't assert anything. It ensures the py wrapper
     # function is generated correctly.
     if test_util.IsGoogleCudaEnabled():
         print("GoogleCuda is enabled")
     else:
         print("GoogleCuda is disabled")
Esempio n. 2
0
    def testTimelineGpu(self):
        if not test_util.IsGoogleCudaEnabled():
            return

        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        with self.test_session(force_gpu=True) as sess:
            const1 = tf.constant(1.0, name='const1')
            const2 = tf.constant(2.0, name='const2')
            result = tf.add(const1, const2) + const1 * const2
            sess.run(result, options=run_options, run_metadata=run_metadata)
        self.assertTrue(run_metadata.HasField('step_stats'))
        step_stats = run_metadata.step_stats
        devices = [d.device for d in step_stats.dev_stats]
        self.assertTrue('/job:localhost/replica:0/task:0/gpu:0' in devices)
        self.assertTrue('/gpu:0/stream:all' in devices)
        tl = timeline.Timeline(step_stats)
        ctf = tl.generate_chrome_trace_format()
        self._validateTrace(ctf)
        tl = timeline.Timeline(step_stats)
        ctf = tl.generate_chrome_trace_format(show_dataflow=False)
        self._validateTrace(ctf)
        tl = timeline.Timeline(step_stats)
        ctf = tl.generate_chrome_trace_format(show_memory=False)
        self._validateTrace(ctf)
        tl = timeline.Timeline(step_stats)
        ctf = tl.generate_chrome_trace_format(show_memory=False,
                                              show_dataflow=False)
        self._validateTrace(ctf)
Esempio n. 3
0
def GetTestConfigs():
  """Get all the valid tests configs to run.

  Returns:
    all the valid test configs as tuples of data_format and use_gpu.
  """
  test_configs = [("NHWC", False), ("NHWC", True)]
  if test_util.IsGoogleCudaEnabled():
    # "NCHW" format is not currently supported on CPU.
    test_configs += [("NCHW", True)]
  return test_configs
Esempio n. 4
0
def is_built_with_cuda():
  """Returns whether TensorFlow was built with CUDA (GPU) support.

  This method should only be used in tests written with `tf.test.TestCase`. A
  typical usage is to skip tests that should only run with CUDA (GPU).

  >>> class MyTest(tf.test.TestCase):
  ...
  ...   def test_add_on_gpu(self):
  ...     if not tf.test.is_built_with_cuda():
  ...       self.skipTest("test is only applicable on GPU")
  ...
  ...     with tf.device("GPU:0"):
  ...       self.assertEqual(tf.math.add(1.0, 2.0), 3.0)

  TensorFlow official binary is built with CUDA.
  """
  return _test_util.IsGoogleCudaEnabled()
Esempio n. 5
0
    def _parse_target(self, arith):
        # Check for the target-augmented entry first. If no match,
        # fall back to the simple name. e.g.:
        # 1. 'int:gpu' , then
        # 2. 'int'
        g = test_util.IsGoogleCudaEnabled()
        if g:
            fulltype = arith + ":gpu"
        else:
            fulltype = arith + ":cpu"

        # print("fulltype: %s" % (fulltype))
        if fulltype in self._fe:
            # print("Found support")
            return fulltype

        assert arith in self._fe, "Fatal: can't find support for arithmetic %s" % (
            arith)
        return arith
Esempio n. 6
0
def is_built_with_cuda():
  """Returns whether TensorFlow was built with CUDA (GPU) support."""
  return _test_util.IsGoogleCudaEnabled()