Esempio n. 1
0
  def testCompileInGraphMode(self):
    if not xla.is_xla_available():
      self.skipTest('Skipping test: XLA is not available.')

    @xla.compile_in_graph_mode
    def add(x, y):
      return x + y

    z = add(1.0, 2.0)
    self.assertAllClose(3.0, self.evaluate(z))

    @xla.compile_in_graph_mode
    def add_subtract(x, y):
      return {'add': x + y, 'sub': x - y}

    z = add_subtract(1.0, 2.0)
    self.assertAllClose({'add': 3.0, 'sub': -1.0}, self.evaluate(z))

    @xla.compile_in_graph_mode
    def add_divide(x, yz):
      return x + yz['y'] / yz['z']

    z = add_divide(1.0, {'y': 2.0, 'z': 3.0})
    self.assertAllClose(1.0 + 2.0 / 3.0, self.evaluate(z))

    if not tf.compat.v1.executing_eagerly():
      # TF2 seems to have trouble with soft device placement (both in eager and
      # tf.function mode); and here we're specifically testing what happens when
      # XLA is not available, e.g., because we didn't compile with GPU support.
      with tf.device('/gpu:0'):
        z = add_subtract(1.0, 2.0)
      self.assertAllClose({'add': 3.0, 'sub': -1.0}, self.evaluate(z))
Esempio n. 2
0
 def testIsXLAAvailable(self):
   available = False
   try:
     self.evaluate(tf.xla.experimental.compile(lambda: tf.constant(0.0)))
     available = True
   except:  # pylint: disable=bare-except
     pass
   self.assertEqual(available, xla.is_xla_available())