Пример #1
0
    def test_spmd_with_summary(self):
        original_device_placement = config.get_soft_device_placement()
        config.set_soft_device_placement(True)

        strategy, _ = get_tpu_strategy(enable_spmd=True)
        summary_dir = self.get_temp_dir()
        writer = summary_ops.create_file_writer_v2(summary_dir)

        with strategy.scope():
            step = variables.Variable(0, dtype=dtypes.int64)

        @def_function.function
        def run():
            with writer.as_default():
                summary_ops.scalar("result", step * 2, step=step)
                step.assign_add(1)

        for _ in range(10):
            strategy.run(run, args=())

        for val in step.values:
            for var in val.variables:
                self.assertAllEqual(10, var)

        config.set_soft_device_placement(original_device_placement)
Пример #2
0
    def test_distribution(self, distribution):
        input_array_1 = np.array([['a', 'b'], ['c', 'd']])
        input_array_2 = np.array([['e', 'f'], ['g', 'h']])
        inp_dataset = dataset_ops.DatasetV2.from_tensor_slices({
            'input_1':
            input_array_1,
            'input_2':
            input_array_2
        })
        inp_dataset = batch_wrapper(inp_dataset, 2, distribution)

        # pyformat: disable
        expected_output = [[b'a_X_e', b'a_X_f', b'b_X_e', b'b_X_f'],
                           [b'c_X_g', b'c_X_h', b'd_X_g', b'd_X_h']]
        config.set_soft_device_placement(True)

        with distribution.scope():
            input_data_1 = keras.Input(shape=(2, ),
                                       dtype=dtypes.string,
                                       name='input_1')
            input_data_2 = keras.Input(shape=(2, ),
                                       dtype=dtypes.string,
                                       name='input_2')
            input_data = [input_data_1, input_data_2]
            layer = category_crossing.CategoryCrossing()
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict(inp_dataset)
        self.assertAllEqual(expected_output, output_dataset)
Пример #3
0
  def test_strategy_with_file(self, strategy):
    # TODO(b/180614455): remove this check when MLIR bridge is always enabled.
    if backend.is_tpu_strategy(strategy):
      self.skipTest("This test needs MLIR bridge on TPU.")

    vocab_data = ["earth", "wind", "and", "fire"]
    vocab_file = self._write_to_temp_file("temp", vocab_data)

    input_array = np.array([["earth", "wind", "and", "fire"],
                            ["fire", "and", "earth", "michigan"]])
    input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
        2, drop_remainder=True)
    expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

    config.set_soft_device_placement(True)

    with strategy.scope():
      input_data = keras.Input(shape=(None,), dtype=dtypes.string)
      layer = index_lookup.IndexLookup(
          max_tokens=None,
          num_oov_indices=1,
          mask_token="",
          oov_token="[OOV]",
          dtype=dtypes.string,
          vocabulary=vocab_file)
      int_data = layer(input_data)
      model = keras.Model(inputs=input_data, outputs=int_data)
    model.compile(loss="mse")
    output_dataset = model.predict(input_dataset)
    self.assertAllEqual(expected_output, output_dataset)
Пример #4
0
  def testSoftPlacement(self):
    if context.executing_eagerly():
      self.assertTrue(config.get_soft_device_placement())
    else:
      self.assertFalse(config.get_soft_device_placement())

    @def_function.function
    def mod():
      with ops.device('/device:GPU:0'):
        a = constant_op.constant(1.0)
        b = constant_op.constant(1.0)
        return math_ops.mod(a, b)

    config.set_soft_device_placement(True)
    self.assertEqual(config.get_soft_device_placement(), True)
    self.assertEqual(
        config.get_soft_device_placement(),
        context.context().soft_device_placement)

    # Since soft placement is enabled, the mod operation should work with CPU
    mod()

    config.set_soft_device_placement(False)
    self.assertEqual(config.get_soft_device_placement(), False)
    self.assertEqual(
        config.get_soft_device_placement(),
        context.context().soft_device_placement)

    # Since soft placement is disabled, the mod operation should fail on GPU
    with self.assertRaises(errors.InvalidArgumentError):
      mod()
    def test_tpu_with_multiple_oov(self, strategy):
        # TODO(b/180614455): remove this check when MLIR bridge is always enabled.
        if "TPU" in type(strategy).__name__:
            self.skipTest("This test needs MLIR bridge on TPU.")

        vocab_data = [[
            "earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
            "and", "fire"
        ]]
        vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        input_dataset = dataset_ops.Dataset.from_tensor_slices(
            input_array).batch(2, drop_remainder=True)
        expected_output = [[3, 4, 5, 6], [6, 5, 3, 1]]

        config.set_soft_device_placement(True)

        with strategy.scope():
            input_data = keras.Input(shape=(None, ), dtype=dtypes.string)
            layer = index_lookup.IndexLookup(max_tokens=None,
                                             num_oov_indices=2,
                                             mask_token="",
                                             oov_token="[OOV]",
                                             dtype=dtypes.string)
            layer.adapt(vocab_dataset)
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)
        model.compile(loss="mse")
        output_dataset = model.predict(input_dataset)
        self.assertAllEqual(expected_output, output_dataset)
    def test_distribution_strategy_output_with_adapt(self, strategy):
        vocab_data = [[
            "earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
            "and", "fire"
        ]]
        vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        input_dataset = dataset_ops.Dataset.from_tensor_slices(
            input_array).batch(2, drop_remainder=True)

        expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

        config.set_soft_device_placement(True)

        with strategy.scope():
            input_data = keras.Input(shape=(None, ), dtype=dtypes.string)
            layer = text_vectorization.TextVectorization(
                max_tokens=None,
                standardize=None,
                split=None,
                output_mode=text_vectorization.INT)
            layer.adapt(vocab_dataset)
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)

        output_dataset = model.predict(input_dataset)
        self.assertAllEqual(expected_output, output_dataset)
Пример #7
0
 def setUp(self):
     super(HardDevicePlacementTest, self).setUp()
     context._reset_context()
     config.set_soft_device_placement(enabled=False)
     context.context().log_device_placement = True
     self.assertEqual(config.get_soft_device_placement(), False)
     self.assertEqual(context.context().soft_device_placement, False)
Пример #8
0
 def setUp(self):
   super(ClusterPlacementTest, self).setUp()
   context._reset_context()
   config.set_soft_device_placement(enabled=True)
   context.context().log_device_placement = True
   workers, _ = test_util.create_local_cluster(2, 0)
   remote.connect_to_remote_host([workers[0].target, workers[1].target])
    def test_distribution_strategy_output_with_adapt(self, strategy):
        # TODO(b/180614455): remove this check when MLIR bridge is always enabled.
        if backend.is_tpu_strategy(strategy):
            self.skipTest("This test needs MLIR bridge on TPU.")

        vocab_data = [[
            "earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
            "and", "fire"
        ]]
        vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        input_dataset = dataset_ops.Dataset.from_tensor_slices(
            input_array).batch(2, drop_remainder=True)

        expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

        config.set_soft_device_placement(True)

        with strategy.scope():
            input_data = keras.Input(shape=(None, ), dtype=dtypes.string)
            layer = text_vectorization.TextVectorization(
                max_tokens=None,
                standardize=None,
                split=None,
                output_mode=text_vectorization.INT)
            layer.adapt(vocab_dataset)
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)

        output_dataset = model.predict(input_dataset)
        self.assertAllEqual(expected_output, output_dataset)
    def test_strategy(self, strategy):
        vocab_data = [[
            "earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
            "and", "fire"
        ]]
        vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        input_dataset = dataset_ops.Dataset.from_tensor_slices(
            input_array).batch(2, drop_remainder=True)
        expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

        config.set_soft_device_placement(True)

        with strategy.scope():
            input_data = keras.Input(shape=(None, ), dtype=dtypes.string)
            layer = index_lookup.IndexLookup(max_tokens=None,
                                             num_oov_indices=1,
                                             mask_token="",
                                             oov_token="[OOV]",
                                             dtype=dtypes.string)
            layer.adapt(vocab_dataset)
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)
        model.compile(loss="mse")
        output_dataset = model.predict(input_dataset)
        self.assertAllEqual(expected_output, output_dataset)
  def DISABLED_test_tpu_distribution_with_file(self, distribution):
    vocab_data = ["earth", "wind", "and", "fire"]
    vocab_file = self._write_to_temp_file("temp", vocab_data)

    input_array = np.array([["earth", "wind", "and", "fire"],
                            ["fire", "and", "earth", "michigan"]])
    input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
        2, drop_remainder=True)
    expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

    config.set_soft_device_placement(True)

    with distribution.scope():
      input_data = keras.Input(shape=(None,), dtype=dtypes.string)
      layer = index_lookup.IndexLookup(
          max_tokens=None,
          num_oov_indices=1,
          mask_token="",
          oov_token="[OOV]",
          dtype=dtypes.string,
          vocabulary=vocab_file)
      int_data = layer(input_data)
      model = keras.Model(inputs=input_data, outputs=int_data)
    model.compile(loss="mse")
    output_dataset = model.predict(input_dataset)
    self.assertAllEqual(expected_output, output_dataset)
Пример #12
0
  def test_recover_from_compilation_failures(self, enable_packed_var):
    # TODO(b/148150981): Stop skipping this test once recovery works
    # for non-local TPU.
    if FLAGS.tpu:
      self.skipTest("Recovery fails for non-local TPU, see b/148150981")

    # Disable automatic outside compilation.
    config.set_soft_device_placement(False)
    strategy = get_tpu_strategy(enable_packed_var)

    @def_function.function
    def compilation_failure_run():

      def computation():
        return random_ops.random_gamma([10], [0.5, 1.5])

      return strategy.run(computation)

    with self.assertRaises(errors.OpError):
      compilation_failure_run()

    @def_function.function
    def good_run():

      def computation():
        return random_ops.random_normal([10])

      return strategy.run(computation)

    good_run()
Пример #13
0
    def testSoftPlacement(self):
        if context.executing_eagerly():
            self.assertTrue(config.get_soft_device_placement())
        else:
            self.assertFalse(config.get_soft_device_placement())

        def test_attr():
            with ops.device('/device:GPU:0'):
                return test_ops.test_attr(T=dtypes.float32, name='test_attr')

        config.set_soft_device_placement(True)
        self.assertEqual(config.get_soft_device_placement(), True)
        self.assertEqual(config.get_soft_device_placement(),
                         context.context().soft_device_placement)

        # Since soft placement is enabled, the test_attr operation should fallback
        # to CPU with pure eager execution as well as functions
        test_attr()
        def_function.function(test_attr)()

        config.set_soft_device_placement(False)
        self.assertEqual(config.get_soft_device_placement(), False)
        self.assertEqual(config.get_soft_device_placement(),
                         context.context().soft_device_placement)

        # Since soft placement is disabled, the test_attr operation should fail on
        # GPU with pure eager execution as well as functions
        with self.assertRaises(errors.InvalidArgumentError):
            test_attr()
        with self.assertRaises(errors.InvalidArgumentError):
            def_function.function(test_attr)()
Пример #14
0
    def testSoftPlacement(self):
        if context.executing_eagerly():
            self.assertTrue(config.get_soft_device_placement())
        else:
            self.assertFalse(config.get_soft_device_placement())

        def mod():
            with ops.device('/device:GPU:0'):
                a = constant_op.constant(1.0)
                b = constant_op.constant(1.0)
                return math_ops.mod(a, b)

        config.set_soft_device_placement(True)
        self.assertEqual(config.get_soft_device_placement(), True)
        self.assertEqual(config.get_soft_device_placement(),
                         context.context().soft_device_placement)

        # Since soft placement is enabled, the mod operation should fallback to CPU
        # with pure eager execution as well as functions
        mod()
        def_function.function(mod)()

        config.set_soft_device_placement(False)
        self.assertEqual(config.get_soft_device_placement(), False)
        self.assertEqual(config.get_soft_device_placement(),
                         context.context().soft_device_placement)

        # Since soft placement is disabled, the mod operation should fail on GPU
        # with pure eager execution as well as functions
        with self.assertRaises(errors.InvalidArgumentError):
            mod()
        with self.assertRaises(errors.InvalidArgumentError):
            def_function.function(mod)()
Пример #15
0
    def testSoftPlacement(self):
        self.assertEqual(config.get_soft_device_placement(), True)

        @def_function.function
        def mod():
            with ops.device('/device:GPU:0'):
                a = constant_op.constant(1.0)
                b = constant_op.constant(1.0)
                return math_ops.mod(a, b)

        # Since soft placement is enabled, the mod operation should work with CPU
        mod()

        config.set_soft_device_placement(False)
        self.assertEqual(config.get_soft_device_placement(), False)
        self.assertEqual(config.get_soft_device_placement(),
                         context.context().soft_device_placement)

        # Since soft placement is disabled, the mod operation should fail on GPU
        with self.assertRaises(errors.InvalidArgumentError):
            mod()

        config.set_soft_device_placement(True)
        self.assertEqual(config.get_soft_device_placement(), True)
        self.assertEqual(config.get_soft_device_placement(),
                         context.context().soft_device_placement)

        # Since soft placement is re-enabled, the mod operation should work with CPU
        mod()
Пример #16
0
    def testGpuNone(self):
        config.set_soft_device_placement(False)
        gpus = config.list_physical_devices('GPU')
        self.assertGreater(len(gpus), 0)

        cpus = config.list_physical_devices('CPU')
        self.assertEqual(len(cpus), 1)

        self.assertEqual(len(config.get_visible_devices('CPU')), 1)
        self.assertGreater(len(config.get_visible_devices('GPU')), 0)

        self.assertEqual(len(config.get_visible_devices('XLA_GPU')), 0)

        config.set_visible_devices(cpus[0])
        self.assertEqual(len(config.get_visible_devices('CPU')), 1)
        self.assertEqual(len(config.get_visible_devices('GPU')), 0)
        self.assertEqual(len(config.list_logical_devices('XLA_GPU')), 0)

        with self.assertRaisesRegex(errors.InvalidArgumentError,
                                    'Could not satisfy'):
            with ops.device('/device:GPU:0'):
                a = array_ops.identity(1.0)
                self.evaluate(a)

        # Modifying the visible devices is not supported
        with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
            config.set_visible_devices(gpus)

        # Setting the same visible devices is fine
        config.set_visible_devices(cpus[0])
Пример #17
0
 def setUp(self):
   super(HardDevicePlacementTest, self).setUp()
   context._context = None
   ops.enable_eager_execution_internal()
   config.set_soft_device_placement(enabled=False)
   context.context().log_device_placement = True
   self.assertEqual(config.get_soft_device_placement(), False)
   self.assertEqual(context.context().soft_device_placement, False)
Пример #18
0
 def setUp(self):
   super(HardDevicePlacementTest, self).setUp()
   context._reset_context()
   config.set_soft_device_placement(enabled=False)
   context.context().log_device_placement = True
   cpus = context.context().list_physical_devices('CPU')
   # Set 2 virtual CPUs
   context.context().set_logical_device_configuration(cpus[0], [
       context.LogicalDeviceConfiguration(),
       context.LogicalDeviceConfiguration()
   ])
   self.assertEqual(config.get_soft_device_placement(), False)
   self.assertEqual(context.context().soft_device_placement, False)
Пример #19
0
 def testSoftPlacement(self):
     # Temporarily replace the context
     # pylint: disable=protected-access
     old_context = context.context()
     context._set_context(context.Context())
     try:
         config.set_device_policy('silent')
         config.set_soft_device_placement(True)
         cpu_tensor = constant_op.constant(1.0)
         result = cpu_tensor + cpu_tensor
         self.assertEqual(result.device,
                          '/job:localhost/replica:0/task:0/device:GPU:0')
     finally:
         context._set_context(old_context)
Пример #20
0
  def test_distribution(self, distribution):
    input_data = np.asarray([["omar"], ["stringer"], ["marlo"], ["wire"]])
    input_dataset = dataset_ops.Dataset.from_tensor_slices(input_data).batch(
        2, drop_remainder=True)
    expected_output = [[0], [0], [1], [0]]

    config.set_soft_device_placement(True)

    with distribution.scope():
      input_data = keras.Input(shape=(None,), dtype=dtypes.string)
      layer = hashing.Hashing(num_bins=2)
      int_data = layer(input_data)
      model = keras.Model(inputs=input_data, outputs=int_data)
    output_dataset = model.predict(input_dataset)
    self.assertAllEqual(expected_output, output_dataset)
Пример #21
0
 def testSoftPlacement(self):
   # Temporarily replace the context
   # pylint: disable=protected-access
   del context._context
   context._context = context.Context()
   try:
     config.set_device_policy('silent')
     config.set_soft_device_placement(True)
     cpu_tensor = constant_op.constant(1.0)
     result = cpu_tensor + cpu_tensor
     self.assertEqual(result.device,
                      '/job:localhost/replica:0/task:0/device:GPU:0')
   finally:
     del context._context
     context._context = context.Context()
Пример #22
0
 def testSoftPlacement(self):
     # Temporarily replace the context
     # pylint: disable=protected-access
     old_context = context.context()
     context._set_context(context.Context())
     try:
         config.set_device_policy('silent')
         config.set_soft_device_placement(True)
         # Avoid the TensorHandle cache hit.
         # TODO(b/169790439): include Context to the TensorHandle cache.
         cpu_tensor = constant_op.constant(1.1)
         result = cpu_tensor + cpu_tensor
         self.assertEqual(result.device,
                          '/job:localhost/replica:0/task:0/device:GPU:0')
     finally:
         context._set_context(old_context)
Пример #23
0
 def testSoftPlacement(self):
   if not context.context().num_gpus():
     self.skipTest('No GPUs found')
   # Temporarily replace the context
   # pylint: disable=protected-access
   del context._context
   context._context = context.Context()
   try:
     config.set_device_policy('silent')
     config.set_soft_device_placement(True)
     cpu_tensor = constant_op.constant(1.0)
     result = cpu_tensor + cpu_tensor
     self.assertEqual(result.device,
                      '/job:localhost/replica:0/task:0/device:GPU:0')
   finally:
     del context._context
     context._context = context.Context()
Пример #24
0
  def test_distribution(self, distribution):
    input_array = np.array([[-1.5, 1.0, 3.4, .5], [0.0, 3.0, 1.3, 0.0]])

    expected_output = [[0, 1, 3, 1], [0, 3, 2, 0]]
    expected_output_shape = [None, 4]

    config.set_soft_device_placement(True)

    with distribution.scope():
      input_data = keras.Input(shape=(4,))
      layer = discretization.Discretization(bins=[0., 1., 2.])
      bucket_data = layer(input_data)
      self.assertAllEqual(expected_output_shape, bucket_data.shape.as_list())

      model = keras.Model(inputs=input_data, outputs=bucket_data)
    output_dataset = model.predict(input_array)
    self.assertAllEqual(expected_output, output_dataset)
Пример #25
0
    def testVirtualGpu(self):
        config.set_soft_device_placement(False)
        gpus = config.list_physical_devices('GPU')
        self.assertNotEqual(len(gpus), 0)

        self.assertIsNone(config.get_logical_device_configuration(gpus[-1]))
        config.set_logical_device_configuration(gpus[-1], [
            context.LogicalDeviceConfiguration(memory_limit=10),
            context.LogicalDeviceConfiguration(memory_limit=10)
        ])
        self.assertEqual(
            len(config.get_logical_device_configuration(gpus[-1])), 2)

        logical_gpus = config.list_logical_devices('GPU')
        self.assertTrue(len(logical_gpus), len(gpus) + 1)
        for i in range(0, len(logical_gpus)):
            with ops.device('/device:GPU:' + str(i)):
                a = array_ops.identity(1.0)
                self.evaluate(a)

        with self.assertRaisesRegex(errors.InvalidArgumentError,
                                    'Could not satisfy'):
            with ops.device('/device:GPU:' + str(len(logical_gpus))):
                a = array_ops.identity(1.0)
                self.evaluate(a)

        # Modifying the GPU configuration is not supported
        with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
            config.set_logical_device_configuration(gpus[-1], [
                context.LogicalDeviceConfiguration(memory_limit=20),
                context.LogicalDeviceConfiguration(memory_limit=20)
            ])

        with self.assertRaisesRegex(RuntimeError, 'cannot be modified'):
            config.set_logical_device_configuration(gpus[-1], [
                context.LogicalDeviceConfiguration(memory_limit=10),
                context.LogicalDeviceConfiguration(memory_limit=10),
                context.LogicalDeviceConfiguration(memory_limit=10)
            ])

        # Setting the same GPU configuration is fine
        config.set_logical_device_configuration(gpus[-1], [
            context.LogicalDeviceConfiguration(memory_limit=10),
            context.LogicalDeviceConfiguration(memory_limit=10)
        ])
Пример #26
0
    def test_distribution(self, distribution):
        input_array_1 = np.array([['a', 'b'], ['c', 'd']])
        input_array_2 = np.array([['e', 'f'], ['g', 'h']])

        # pyformat: disable
        expected_output = [[b'a_X_e', b'a_X_f', b'b_X_e', b'b_X_f'],
                           [b'c_X_g', b'c_X_h', b'd_X_g', b'd_X_h']]
        config.set_soft_device_placement(True)

        with distribution.scope():
            input_data_1 = keras.Input(shape=(2, ), dtype=dtypes.string)
            input_data_2 = keras.Input(shape=(2, ), dtype=dtypes.string)
            input_data = [input_data_1, input_data_2]
            layer = categorical_crossing.CategoryCrossing()
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict([input_array_1, input_array_2])
        self.assertAllEqual(expected_output, output_dataset)
Пример #27
0
    def test_distribution(self, distribution):
        input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])
        inp_dataset = dataset_ops.DatasetV2.from_tensor_slices(input_array)
        inp_dataset = batch_wrapper(inp_dataset, 2, distribution)

        # pyformat: disable
        expected_output = [[0, 1, 1, 1, 0, 0], [1, 1, 0, 1, 0, 0]]
        # pyformat: enable
        max_tokens = 6
        config.set_soft_device_placement(True)

        with distribution.scope():
            input_data = keras.Input(shape=(4, ), dtype=dtypes.int32)
            layer = category_encoding.CategoryEncoding(
                max_tokens=max_tokens, output_mode=category_encoding.BINARY)
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict(inp_dataset)
        self.assertAllEqual(expected_output, output_dataset)
Пример #28
0
    def test_dynamic_shape_with_outside_compilation_failure(self):
        # Enable automatic outside compilation.
        config.set_soft_device_placement(True)
        strategy = get_tpu_strategy()
        dataset = dataset_ops.Dataset.from_tensors(
            ("string", 1.0)).repeat().batch(2, drop_remainder=False)
        dataset = strategy.experimental_distribute_dataset(dataset)
        iterator = iter(dataset)

        @def_function.function
        def train_fn(iterator):
            def step_fn(inputs):
                _, inputs = inputs
                return math_ops.reduce_sum(inputs)

            return strategy.experimental_local_results(
                strategy.run(step_fn, args=(next(iterator), )))

        with self.assertRaisesRegex(errors.InternalError,
                                    "Compilation failure"):
            logging.info(train_fn(iterator))
    def test_tpu_distribution(self, distribution):
        vocab_data = [[
            "earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
            "and", "fire"
        ]]
        vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
        input_array = np.array([["earth", "wind", "and", "fire"],
                                ["fire", "and", "earth", "michigan"]])
        input_dataset = dataset_ops.Dataset.from_tensor_slices(
            input_array).batch(2, drop_remainder=True)
        expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]

        config.set_soft_device_placement(True)

        with distribution.scope():
            input_data = keras.Input(shape=(None, ), dtype=dtypes.string)
            layer = get_layer_class()()
            layer.adapt(vocab_dataset)
            int_data = layer(input_data)
            model = keras.Model(inputs=input_data, outputs=int_data)
        output_dataset = model.predict(input_dataset)
        self.assertAllEqual(expected_output, output_dataset)
Пример #30
0
    def testGpuNone(self):
        config.set_soft_device_placement(False)
        gpus = config.list_physical_devices('GPU')
        self.assertGreater(len(gpus), 0)

        cpus = config.list_physical_devices('CPU')
        self.assertEqual(len(cpus), 1)

        self.assertEqual(len(config.get_visible_devices('CPU')), 1)
        self.assertGreater(len(config.get_visible_devices('GPU')), 0)

        # get_visible_devices filters out XLA_* devices.  list_logical_devices does
        # not, but we can't call it here because it initializes the devices and
        # calling set_visible_devices after that is disallowed.
        self.assertEqual(len(config.get_visible_devices('XLA_GPU')), 0)

        config.set_visible_devices(cpus[0])
        self.assertEqual(len(config.get_visible_devices('CPU')), 1)
        self.assertEqual(len(config.get_visible_devices('GPU')), 0)
        self.assertEqual(len(config.list_logical_devices('XLA_GPU')), 0)

        with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                     'Could not satisfy'):
            with ops.device('/device:GPU:0'):
                a = array_ops.identity(1.0)
                self.evaluate(a)

        with self.assertRaisesRegexp(errors.InvalidArgumentError,
                                     'Could not satisfy'):
            with ops.device('/device:XLA_GPU:0'):
                a = array_ops.identity(1.0)
                self.evaluate(a)

        # Modifying the visible devices is not supported
        with self.assertRaisesRegexp(RuntimeError, 'cannot be modified'):
            config.set_visible_devices(gpus)

        # Setting the same visible devices is fine
        config.set_visible_devices(cpus[0])
Пример #31
0
  def test_single_tpu_jit_compile_with_outside_compilation(self):
    context.enable_jit_compile_rewrite()
    get_tpu_strategy(True)
    config.set_soft_device_placement(True)
    with ops.device("/device:TPU:0"):
      a = variables.Variable(1)

    def get_a_plus_one():
      return a + 1

    @def_function.function(
        input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
    def foo(x):
      b = x + get_a_plus_one()
      my_str = string_ops.as_string(b)
      new_str = my_str + "0"
      c = string_ops.string_to_number(new_str, out_type=dtypes.int32)
      logging_ops.print_v2(c)
      b = c + get_a_plus_one()
      return b + 1

    with ops.device("/device:TPU:0"):
      result = foo(a)
    self.assertAllEqual(33, result)
Пример #32
0
  def testEnableSoftPlacement(self):
    self.assertEqual(config.get_soft_device_placement(), False)

    config.set_soft_device_placement(True)
    self.assertEqual(config.get_soft_device_placement(), True)
    self.assertEqual(
        config.get_soft_device_placement(),
        context.context().soft_device_placement)

    config.set_soft_device_placement(False)
    self.assertEqual(config.get_soft_device_placement(), False)
    self.assertEqual(
        config.get_soft_device_placement(),
        context.context().soft_device_placement)

    constant_op.constant(1)
    with self.assertRaises(RuntimeError):
      config.set_soft_device_placement(True)
    with self.assertRaises(RuntimeError):
      config.set_soft_device_placement(False)