Exemple #1
0
 def _get_squad_model():
     """Get Squad model and optimizer."""
     squad_model, core_model = bert_models.squad_model(
         bert_config,
         max_seq_length,
         float_type=tf.float16 if use_float16 else tf.float32)
     squad_model.optimizer = optimization.create_optimizer(
         FLAGS.learning_rate, steps_per_epoch * epochs, warmup_steps)
     if use_float16:
         # Wraps optimizer with a LossScaleOptimizer. This is done automatically
         # in compile() with the "mixed_float16" policy, but since we do not call
         # compile(), we must wrap the optimizer manually.
         squad_model.optimizer = (
             tf.keras.mixed_precision.experimental.LossScaleOptimizer(
                 squad_model.optimizer,
                 loss_scale=common_flags.get_loss_scale()))
     if FLAGS.fp16_implementation == 'graph_rewrite':
         # Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
         # determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
         # which will ensure tf.compat.v2.keras.mixed_precision and
         # tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
         # up.
         squad_model.optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
             squad_model.optimizer)
     return squad_model, core_model
Exemple #2
0
 def _get_squad_model():
     """Get Squad model and optimizer."""
     squad_model, core_model = bert_models.squad_model(
         bert_config,
         max_seq_length,
         float_type=tf.float16 if use_float16 else tf.float32)
     squad_model.optimizer = optimization.create_optimizer(
         FLAGS.learning_rate, steps_per_epoch * epochs, warmup_steps)
     if use_float16:
         squad_model.optimizer = (
             tf.keras.mixed_precision.experimental.LossScaleOptimizer(
                 squad_model.optimizer,
                 loss_scale=common_flags.get_loss_scale()))
     return squad_model, core_model
Exemple #3
0
 def _get_squad_model():
   """Get Squad model and optimizer."""
   squad_model, core_model = bert_models.squad_model(
       bert_config,
       max_seq_length,
       float_type=tf.float16 if use_float16 else tf.float32)
   squad_model.optimizer = optimization.create_optimizer(
       FLAGS.learning_rate, steps_per_epoch * epochs, warmup_steps)
   if use_float16:
     # Wraps optimizer with a LossScaleOptimizer. This is done automatically
     # in compile() with the "mixed_float16" policy, but since we do not call
     # compile(), we must wrap the optimizer manually.
     squad_model.optimizer = (
         tf.keras.mixed_precision.experimental.LossScaleOptimizer(
             squad_model.optimizer, loss_scale=common_flags.get_loss_scale()))
   return squad_model, core_model