Ejemplo n.º 1
0
def get_callbacks(model_checkpoint: bool = True,
                  include_tensorboard: bool = True,
                  time_history: bool = True,
                  track_lr: bool = True,
                  write_model_weights: bool = True,
                  initial_step: int = 0,
                  batch_size: int = 0,
                  log_steps: int = 0,
                  model_dir: str = None) -> List[tf.keras.callbacks.Callback]:
    """Get all callbacks."""
    model_dir = model_dir or ''
    callbacks = []
    if model_checkpoint:
        ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
        callbacks.append(
            tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
                                               save_weights_only=True,
                                               verbose=1))
    if include_tensorboard:
        callbacks.append(
            CustomTensorBoard(log_dir=model_dir,
                              track_lr=track_lr,
                              initial_step=initial_step,
                              write_images=write_model_weights))
    if time_history:
        callbacks.append(
            keras_utils.TimeHistory(
                batch_size,
                log_steps,
                logdir=model_dir if include_tensorboard else None))
    return callbacks
Ejemplo n.º 2
0
def get_callbacks(steps_per_epoch,
                  pruning_method=None,
                  enable_checkpoint_and_export=False,
                  model_dir=None):
    """Returns common callbacks."""
    time_callback = keras_utils.TimeHistory(
        FLAGS.batch_size,
        FLAGS.log_steps,
        logdir=FLAGS.model_dir if FLAGS.enable_tensorboard else None)
    callbacks = [time_callback]

    if FLAGS.enable_tensorboard:
        callbacks += [
            TensorBoardWithHParamsV1(FLAGS.flag_values_dict(),
                                     log_dir=FLAGS.model_dir,
                                     update_freq=FLAGS.log_steps),
            ExamplesPerSecondKerasHook(output_dir=FLAGS.model_dir,
                                       every_n_steps=FLAGS.log_steps)
        ]

    if FLAGS.profile_steps:
        profiler_callback = keras_utils.get_profiler_callback(
            FLAGS.model_dir, FLAGS.profile_steps, FLAGS.enable_tensorboard,
            steps_per_epoch)
        callbacks.append(profiler_callback)

    is_pruning_enabled = pruning_method is not None

    if is_pruning_enabled:
        callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
        if model_dir is not None:
            callbacks.append(
                tfmot.sparsity.keras.PruningSummaries(log_dir=model_dir,
                                                      profile_batch=0))

    if enable_checkpoint_and_export:
        if model_dir is not None:
            ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
            callbacks.append(
                tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
                                                   save_weights_only=True))
    return callbacks
Ejemplo n.º 3
0
def get_callbacks(steps_per_epoch,
                  pruning_method=None,
                  enable_checkpoint_and_export=False,
                  model_dir=None):
    """Returns common callbacks."""
    time_callback = keras_utils.TimeHistory(
        FLAGS.batch_size,
        FLAGS.log_steps,
        logdir=FLAGS.model_dir if FLAGS.enable_tensorboard else None)
    callbacks = [time_callback]

    if FLAGS.enable_tensorboard:
        tensorboard_callback = tf.keras.callbacks.TensorBoard(
            log_dir=FLAGS.model_dir)
        callbacks.append(tensorboard_callback)

    if FLAGS.profile_steps:
        profiler_callback = keras_utils.get_profiler_callback(
            FLAGS.model_dir, FLAGS.profile_steps, FLAGS.enable_tensorboard,
            steps_per_epoch)
        callbacks.append(profiler_callback)

    is_pruning_enabled = pruning_method is not None
    if is_pruning_enabled:
        callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
        if model_dir is not None:
            callbacks.append(
                tfmot.sparsity.keras.PruningSummaries(log_dir=model_dir,
                                                      profile_batch=0))

    if enable_checkpoint_and_export:
        if model_dir is not None:
            ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
            callbacks.append(
                tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
                                                   save_weights_only=True))
    return callbacks
Ejemplo n.º 4
0
def run(flags_obj):
    """Run ResNet ImageNet training and eval loop using custom training loops.

    Args:
      flags_obj: An object containing parsed flag values.

    Raises:
      ValueError: If fp16 is passed as it is not currently supported.

    Returns:
      Dictionary of training and eval stats.
    """

    keras_utils.set_session_config(
        enable_eager=flags_obj.enable_eager,
        enable_xla=flags_obj.enable_xla,
        enable_scoped_allocator=flags_obj.enable_scoped_allocator)
    # Enable habana bf16 conversion pass only if native keras mixed precision is disabled
    if flags.FLAGS.dtype == 'bf16' and flags.FLAGS.use_keras_mixed_precision == False:
        performance.set_mixed_precision_policy(tf.float32)
        os.environ['TF_BF16_CONVERSION'] = flags.FLAGS.bf16_config_path
    else:
        performance.set_mixed_precision_policy(
            flags_core.get_tf_dtype(flags_obj))

    os.environ.setdefault("TF_DISABLE_MKL", "1")
    os.environ.setdefault("TF_ALLOW_CONTROL_EDGES_IN_HABANA_OPS", "1")

    # This only affects GPU.
    common.set_cudnn_batchnorm_mode()

    # TODO(anj-s): Set data_format without using Keras.
    data_format = flags_obj.data_format
    if data_format is None:
        data_format = ('channels_first'
                       if tf.test.is_built_with_cuda() else 'channels_last')
    tf.keras.backend.set_image_data_format(data_format)
    batch_size = adjust_batch_size(flags_obj.batch_size)

    if horovod_enabled():
        model_dir = os.path.join(flags_obj.model_dir,
                                 "worker_" + str(hvd.rank()))
    else:
        model_dir = flags_obj.model_dir

    hls_addresses = str(os.environ.get("MULTI_HLS_IPS",
                                       "127.0.0.1")).split(",")
    TF_BASE_PORT = 2410
    mpi_rank = comm_rank()
    mpi_size = comm_size()

    worker_hosts = ",".join([
        ",".join([
            address + ':' + str(TF_BASE_PORT + rank)
            for rank in range(mpi_size // len(hls_addresses))
        ]) for address in hls_addresses
    ])
    task_index = mpi_rank

    # Configures cluster spec for distribution strategy.
    _ = distribution_utils.configure_cluster(worker_hosts, task_index)

    strategy = distribution_utils.get_distribution_strategy(
        distribution_strategy=flags_obj.distribution_strategy,
        num_gpus=flags_obj.num_gpus,
        all_reduce_alg=flags_obj.all_reduce_alg,
        num_packs=flags_obj.num_packs,
        tpu_address=flags_obj.tpu)

    train_writer, eval_writer = None, None
    if flags_obj.enable_tensorboard:
        train_writer = tf.summary.create_file_writer(model_dir)
        eval_writer = tf.summary.create_file_writer(
            os.path.join(model_dir, 'eval'))
        hparams = flags_obj.flag_values_dict()
        hparams.setdefault('precision', flags_obj.dtype)
        write_hparams_v2(train_writer, hparams)

    per_epoch_steps, train_epochs, eval_steps = get_num_train_iterations(
        flags_obj)
    steps_per_loop = min(flags_obj.steps_per_loop, per_epoch_steps)
    train_steps = train_epochs * per_epoch_steps

    logging.info(
        'Training %d epochs, each epoch has %d steps, '
        'total steps: %d; Eval %d steps', train_epochs, per_epoch_steps,
        train_steps, eval_steps)

    time_callback = keras_utils.TimeHistory(
        batch_size,
        flags_obj.log_steps,
        summary_writer=train_writer,
        batch_size_per_node=flags_obj.batch_size)
    profiler_callback = None
    if flags_obj.profile_steps is not None:
        profiler_callback = keras_utils.get_profiler_callback(
            model_dir, flags_obj.profile_steps, flags_obj.enable_tensorboard,
            per_epoch_steps)
    with distribution_utils.get_strategy_scope(strategy):
        runnable = resnet_runnable.ResnetRunnable(flags_obj, time_callback,
                                                  train_steps, per_epoch_steps,
                                                  profiler_callback)

    eval_interval = flags_obj.epochs_between_evals * per_epoch_steps
    checkpoint_interval = (per_epoch_steps
                           if flags_obj.enable_checkpoint_and_export else None)
    summary_interval = flags_obj.log_steps if flags_obj.enable_tensorboard else None

    checkpoint_manager = tf.train.CheckpointManager(
        runnable.checkpoint,
        directory=model_dir,
        max_to_keep=10,
        step_counter=runnable.global_step,
        checkpoint_interval=checkpoint_interval)

    train_steps = per_epoch_steps * train_epochs

    resnet_controller = controller.Controller(
        strategy,
        runnable.train,
        runnable.evaluate,
        global_step=runnable.global_step,
        steps_per_loop=steps_per_loop,
        train_steps=train_steps,
        checkpoint_manager=checkpoint_manager,
        summary_interval=summary_interval,
        eval_steps=eval_steps,
        eval_interval=eval_interval,
        train_summary_writer=train_writer,
        eval_summary_writer=eval_writer)

    time_callback.on_train_begin()
    resnet_controller.train(evaluate=not flags_obj.skip_eval)
    time_callback.on_train_end()

    stats = build_stats(runnable, time_callback)
    return stats
Ejemplo n.º 5
0
def run(flags_obj):
    """Run ResNet ImageNet training and eval loop using custom training loops.

    Args:
      flags_obj: An object containing parsed flag values.

    Raises:
      ValueError: If fp16 is passed as it is not currently supported.

    Returns:
      Dictionary of training and eval stats.
    """
    tf.get_logger().propagate = False
    output_dir = None
    if "LOG_DIR" in os.environ:
        output_dir = os.environ["LOG_DIR"]
    mlperf_mlloger, mlperf_mllog = get_mllog_mlloger(output_dir)
    mlperf_mlloger.event(key=mlperf_mllog.constants.CACHE_CLEAR, value=True)
    mlperf_mlloger.start(key=mlperf_mllog.constants.INIT_START, value=None)
    mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_BENCHMARK,
                         value=mlperf_mllog.constants.RESNET)
    mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_ORG,
                         value='Habana')
    mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_DIVISION,
                         value='closed')
    mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_PLATFORM,
                         value='gaudi-{}'.format(flags_obj.num_gpus))
    mlperf_mlloger.event(key=mlperf_mllog.constants.SUBMISSION_STATUS,
                         value='onprem')

    keras_utils.set_session_config(enable_eager=flags_obj.enable_eager,
                                   enable_xla=flags_obj.enable_xla)
    performance.set_mixed_precision_policy(flags_core.get_tf_dtype(flags_obj))

    # This only affects GPU.
    common.set_cudnn_batchnorm_mode()

    # TODO(anj-s): Set data_format without using Keras.
    data_format = flags_obj.data_format
    if data_format is None:
        data_format = ('channels_first'
                       if tf.test.is_built_with_cuda() else 'channels_last')
    tf.keras.backend.set_image_data_format(data_format)

    if horovod_enabled():
        model_dir = os.path.join(flags_obj.model_dir,
                                 "worker_" + str(hvd.rank()))
    else:
        model_dir = flags_obj.model_dir

    global_batch_size = get_global_batch_size(flags_obj.batch_size)

    strategy = distribution_utils.get_distribution_strategy(
        distribution_strategy=flags_obj.distribution_strategy,
        num_gpus=flags_obj.num_gpus,
        all_reduce_alg=flags_obj.all_reduce_alg,
        num_packs=flags_obj.num_packs,
        tpu_address=flags_obj.tpu)

    mlperf_mlloger.event(key=mlperf_mllog.constants.GLOBAL_BATCH_SIZE,
                         value=global_batch_size)
    mlperf_mlloger.event(key=mlperf_mllog.constants.TRAIN_SAMPLES,
                         value=imagenet_preprocessing.NUM_IMAGES['train'])
    mlperf_mlloger.event(key=mlperf_mllog.constants.EVAL_SAMPLES,
                         value=imagenet_preprocessing.NUM_IMAGES['validation'])
    group_batch_norm = 1
    mlperf_mlloger.event(key=mlperf_mllog.constants.MODEL_BN_SPAN,
                         value=flags_obj.batch_size * group_batch_norm)

    train_writer, eval_writer = None, None
    if flags_obj.enable_tensorboard:
        train_writer = tf.summary.create_file_writer(model_dir)
        eval_writer = tf.summary.create_file_writer(
            os.path.join(model_dir, 'eval'))
        hparams = flags_obj.flag_values_dict()
        write_hparams_v2(train_writer, hparams)

    per_epoch_steps, train_epochs, eval_steps = get_num_train_iterations(
        flags_obj)
    steps_per_loop = min(flags_obj.steps_per_loop, per_epoch_steps)
    train_steps = train_epochs * per_epoch_steps

    logging.info(
        'Training %d epochs, each epoch has %d steps, '
        'total steps: %d; Eval %d steps', train_epochs, per_epoch_steps,
        train_steps, eval_steps)

    time_callback = keras_utils.TimeHistory(
        global_batch_size,
        flags_obj.log_steps,
        summary_writer=train_writer,
        batch_size_per_node=flags_obj.batch_size)
    profiler_callback = None
    if flags_obj.profile_steps is not None:
        profiler_callback = keras_utils.get_profiler_callback(
            model_dir, flags_obj.profile_steps, flags_obj.enable_tensorboard,
            per_epoch_steps)
    with distribution_utils.get_strategy_scope(strategy):
        runnable = resnet_runnable.ResnetRunnable(flags_obj, time_callback,
                                                  train_steps, per_epoch_steps,
                                                  profiler_callback,
                                                  mlperf_mlloger, mlperf_mllog)

    eval_interval = flags_obj.epochs_between_evals * per_epoch_steps
    eval_offset = flags_obj.eval_offset_epochs * per_epoch_steps
    if eval_offset != 0:
        eval_offset -= eval_interval
    checkpoint_interval = (per_epoch_steps
                           if flags_obj.enable_checkpoint_and_export else None)
    summary_interval = per_epoch_steps if flags_obj.enable_tensorboard else None

    checkpoint_manager = tf.train.CheckpointManager(
        runnable.checkpoint,
        directory=model_dir,
        max_to_keep=10,
        step_counter=runnable.global_step,
        checkpoint_interval=checkpoint_interval)

    device_warmup_steps = (flags_obj.device_warmup_steps
                           if flags_obj.enable_device_warmup else 0)
    if flags_obj.enable_device_warmup:
        logging.info('Warmup for %d steps.', device_warmup_steps)

    train_steps = per_epoch_steps * train_epochs

    resnet_controller = controller.Controller(
        strategy,
        runnable.train,
        runnable.evaluate,
        runnable.warmup,
        global_step=runnable.global_step,
        steps_per_loop=steps_per_loop,
        train_steps=train_steps,
        checkpoint_manager=checkpoint_manager,
        summary_interval=summary_interval,
        eval_steps=eval_steps,
        eval_interval=eval_interval,
        eval_offset=eval_offset,
        device_warmup_steps=device_warmup_steps,
        train_summary_writer=train_writer,
        eval_summary_writer=eval_writer)

    if flags_obj.enable_device_warmup:
        resnet_controller.warmup()

    mlperf_mlloger.end(key=mlperf_mllog.constants.INIT_STOP)

    hvd.broadcast(0, 0)
    time_callback.on_train_begin()
    mlperf_mlloger.start(key=mlperf_mllog.constants.RUN_START)
    mlperf_mlloger.start(
        key=mlperf_mllog.constants.BLOCK_START,
        value=None,
        metadata={
            'first_epoch_num':
            1,
            'epoch_count':
            (flags_obj.eval_offset_epochs if flags_obj.eval_offset_epochs > 0
             else flags_obj.epochs_between_evals)
        })
    resnet_controller.train(evaluate=not flags_obj.skip_eval)
    if not flags_obj.skip_eval:
        eval_accuracy = resnet_controller.last_eval_output['test_accuracy']
        if eval_accuracy >= flags_obj.target_accuracy:
            mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP,
                               value=None,
                               metadata={'status': 'success'})
        else:
            mlperf_mlloger.end(key=mlperf_mllog.constants.RUN_STOP,
                               value=None,
                               metadata={'status': 'fail'})
    time_callback.on_train_end()

    stats = build_stats(runnable, time_callback)
    return stats
def run(flags_obj):
  """Run ResNet ImageNet training and eval loop using custom training loops.

  Args:
    flags_obj: An object containing parsed flag values.

  Raises:
    ValueError: If fp16 is passed as it is not currently supported.

  Returns:
    Dictionary of training and eval stats.
  """
  keras_utils.set_session_config(
      enable_eager=flags_obj.enable_eager,
      enable_xla=flags_obj.enable_xla)
  performance.set_mixed_precision_policy(flags_core.get_tf_dtype(flags_obj))

  # This only affects GPU.
  common.set_cudnn_batchnorm_mode()

  # TODO(anj-s): Set data_format without using Keras.
  data_format = flags_obj.data_format
  if data_format is None:
    data_format = ('channels_first'
                   if tf.test.is_built_with_cuda() else 'channels_last')
  tf.keras.backend.set_image_data_format(data_format)

  strategy = distribution_utils.get_distribution_strategy(
      distribution_strategy=flags_obj.distribution_strategy,
      num_gpus=flags_obj.num_gpus,
      all_reduce_alg=flags_obj.all_reduce_alg,
      num_packs=flags_obj.num_packs,
      tpu_address=flags_obj.tpu)

  per_epoch_steps, train_epochs, eval_steps = get_num_train_iterations(
      flags_obj)
  steps_per_loop = min(flags_obj.steps_per_loop, per_epoch_steps)

  logging.info(
      'Training %d epochs, each epoch has %d steps, '
      'total steps: %d; Eval %d steps', train_epochs, per_epoch_steps,
      train_epochs * per_epoch_steps, eval_steps)

  time_callback = keras_utils.TimeHistory(
      flags_obj.batch_size,
      flags_obj.log_steps,
      logdir=flags_obj.model_dir if flags_obj.enable_tensorboard else None)
  profiler_callback = None
  if flags_obj.profile_steps is not None:
    profiler_callback = keras_utils.get_profiler_callback(
        flags_obj.model_dir,
        flags_obj.profile_steps,
        flags_obj.enable_tensorboard,
        per_epoch_steps)
  with distribution_utils.get_strategy_scope(strategy):
    runnable = resnet_runnable.ResnetRunnable(flags_obj, time_callback,
                                              per_epoch_steps,
                                              profiler_callback)

  eval_interval = flags_obj.epochs_between_evals * per_epoch_steps
  checkpoint_interval = (
      per_epoch_steps if flags_obj.enable_checkpoint_and_export else None)
  summary_interval = per_epoch_steps if flags_obj.enable_tensorboard else None

  checkpoint_manager = tf.train.CheckpointManager(
      runnable.checkpoint,
      directory=flags_obj.model_dir,
      max_to_keep=10,
      step_counter=runnable.global_step,
      checkpoint_interval=checkpoint_interval)

  resnet_controller = controller.Controller(
      strategy,
      runnable.train,
      runnable.evaluate,
      global_step=runnable.global_step,
      steps_per_loop=steps_per_loop,
      train_steps=per_epoch_steps * train_epochs,
      checkpoint_manager=checkpoint_manager,
      summary_interval=summary_interval,
      eval_steps=eval_steps,
      eval_interval=eval_interval)

  time_callback.on_train_begin()
  resnet_controller.train(evaluate=not flags_obj.skip_eval)
  time_callback.on_train_end()

  stats = build_stats(runnable, time_callback)
  return stats