def test_optimizer_errors(self):
    opt = 1
    if tf2.enabled():
      expected_regex = ('"opt" must be an instance of a '
                        'tf.keras.optimizers.Optimizer, but got')
    else:
      expected_regex = ('"opt" must be an instance of a tf.train.Optimizer or '
                        'a tf.keras.optimizers.Optimizer, but got')
    with self.assertRaisesRegexp(ValueError, expected_regex):
      enable_mixed_precision_graph_rewrite(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))

    opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
    opt = loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
                                                                   'dynamic')
    with self.assertRaisesRegexp(ValueError,
                                 '"opt" must not already be an instance of a '
                                 'MixedPrecisionLossScaleOptimizer.'):
      enable_mixed_precision_graph_rewrite(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))

    opt = gradient_descent_v2.SGD(1.0)
    opt = loss_scale_optimizer_v2.LossScaleOptimizer(opt, 'dynamic')
    with self.assertRaisesRegexp(ValueError,
                                 '"opt" must not already be an instance of a '
                                 'LossScaleOptimizer.'):
      enable_mixed_precision_graph_rewrite(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))
示例#2
0
  def testOptimizerBoolOption(self, field):
    # TODO(b/128531235): Improve testing of option
    options = config.get_optimizer_experimental_options()
    self.assertFalse(options.get(field))

    config.set_optimizer_experimental_options({field: True})
    options[field] = True
    self.assertDictEqual(config.get_optimizer_experimental_options(), options)
    self.assertDictEqual(
        context.context().get_optimizer_experimental_options(), options)

    config.set_optimizer_experimental_options({field: False})
    options[field] = False
    self.assertDictEqual(config.get_optimizer_experimental_options(), options)
    self.assertDictEqual(
        context.context().get_optimizer_experimental_options(), options)
示例#3
0
    def testOptimizerToggleOption(self, field):
        # TODO(b/128531235): Improve testing of option
        options = config.get_optimizer_experimental_options()
        self.assertIsNone(options.get(field))

        config.set_optimizer_experimental_options({field: True})
        options[field] = True
        self.assertDictEqual(config.get_optimizer_experimental_options(),
                             options)
        self.assertDictEqual(
            context.context().get_optimizer_experimental_options(), options)

        config.set_optimizer_experimental_options({field: False})
        options[field] = False
        self.assertDictEqual(config.get_optimizer_experimental_options(),
                             options)
        self.assertDictEqual(
            context.context().get_optimizer_experimental_options(), options)
 def test_optimizer_errors(self):
     opt = gradient_descent_v2.SGD(1.0)
     opt = loss_scale_optimizer_v2.LossScaleOptimizerV1(opt, 'dynamic')
     with self.assertRaisesRegex(
             ValueError, '"opt" must not already be an instance of a '
             'LossScaleOptimizer.'):
         enable_mixed_precision_graph_rewrite(opt)
     self.assertFalse(config.get_optimizer_experimental_options().get(
         'auto_mixed_precision', False))
  def test_optimizer_errors(self):
    opt = 1
    expected_regex = ('"opt" must be an instance of a tf.train.Optimizer or '
                      'a tf.keras.optimizers.Optimizer, but got')
    with self.assertRaisesRegex(ValueError, expected_regex):
      mixed_precision.enable_mixed_precision_graph_rewrite_v1(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))

    opt = gradient_descent_v1.GradientDescentOptimizer(1.0)
    opt = loss_scale_optimizer_v1.MixedPrecisionLossScaleOptimizer(opt,
                                                                   'dynamic')
    with self.assertRaisesRegex(
        ValueError, '"opt" must not already be an instance of a '
        'MixedPrecisionLossScaleOptimizer.'):
      mixed_precision.enable_mixed_precision_graph_rewrite_v1(opt)
    self.assertFalse(config.get_optimizer_experimental_options()
                     .get('auto_mixed_precision', False))
示例#6
0
    def testOptimizerToggleOptionPinToHost(self):
        options = config.get_optimizer_experimental_options()
        self.assertIsNone(options.get('pin_to_host_optimization'))

        @def_function.function
        def fun():
            op = test_ops.device_placement_op()
            return op

        # Force optimizer to run for all graphs
        config.set_optimizer_experimental_options({'min_graph_nodes': -1})
        options['min_graph_nodes'] = -1

        # Since pin to host is disabled, the operation should go on GPU
        gpu = self.evaluate(fun())
        self.assertIn(compat.as_bytes('GPU'), gpu)

        config.set_optimizer_experimental_options(
            {'pin_to_host_optimization': True})
        options['pin_to_host_optimization'] = True
        self.assertDictEqual(config.get_optimizer_experimental_options(),
                             options)
        self.assertDictEqual(
            context.context().get_optimizer_experimental_options(), options)

        # Since pin to host is enabled, the operation should go on CPU
        cpu = self.evaluate(fun())
        self.assertIn(compat.as_bytes('CPU'), cpu)

        config.set_optimizer_experimental_options(
            {'pin_to_host_optimization': False})
        options['pin_to_host_optimization'] = False
        self.assertDictEqual(config.get_optimizer_experimental_options(),
                             options)
        self.assertDictEqual(
            context.context().get_optimizer_experimental_options(), options)

        # Since pin to host is disabled again, the operation should go on GPU
        gpu2 = self.evaluate(fun())
        self.assertIn(compat.as_bytes('GPU'), gpu2)
示例#7
0
  def testOptimizerToggleOptionPinToHost(self):
    options = config.get_optimizer_experimental_options()
    self.assertIsNone(options.get('pin_to_host_optimization'))

    @def_function.function
    def fun():
      op = test_ops.device_placement_op()
      return op

    # Force optimizer to run for all graphs
    config.set_optimizer_experimental_options({'min_graph_nodes': -1})
    options['min_graph_nodes'] = -1

    # Since pin to host is disabled, the operation should go on GPU
    gpu = self.evaluate(fun())
    self.assertIn(compat.as_bytes('GPU'), gpu)

    config.set_optimizer_experimental_options(
        {'pin_to_host_optimization': True})
    options['pin_to_host_optimization'] = True
    self.assertDictEqual(config.get_optimizer_experimental_options(), options)
    self.assertDictEqual(
        context.context().get_optimizer_experimental_options(), options)

    # Since pin to host is enabled, the operation should go on CPU
    cpu = self.evaluate(fun())
    self.assertIn(compat.as_bytes('CPU'), cpu)

    config.set_optimizer_experimental_options(
        {'pin_to_host_optimization': False})
    options['pin_to_host_optimization'] = False
    self.assertDictEqual(config.get_optimizer_experimental_options(), options)
    self.assertDictEqual(
        context.context().get_optimizer_experimental_options(), options)

    # Since pin to host is disabled again, the operation should go on GPU
    gpu2 = self.evaluate(fun())
    self.assertIn(compat.as_bytes('GPU'), gpu2)