Ejemplo n.º 1
0
 def _basic_metrics(self):
     metrics_basic = {}
     metrics_basic['logits'] = []
     metrics_basic['probs'] = [tf.keras.metrics.SparseCategoricalAccuracy(name='acc'),
                               um.ExpectedCalibrationError(num_bins=self.FLAGS.ece_bins,name='ece'),
                               nll(name='nll'),
                               BrierScore(name='brier')]
     metrics_basic['certs'] = [tf.keras.metrics.SparseCategoricalAccuracy(name='acc'),
                               um.ExpectedCalibrationError(num_bins=self.FLAGS.ece_bins,name='ece'),
                               nll(name='nll'),
                               BrierScore(name='brier')]
     metrics_basic['logits_from_certs'] = []
     
     return metrics_basic
Ejemplo n.º 2
0
def get_diabetic_retinopathy_base_metrics(use_tpu, num_bins):
  """Initialize base metrics for non-ensemble Diabetic Retinopathy predictors.

  Should be called within the distribution strategy scope (e.g. see
  deterministic.py script).

  Note:
    We disclude AUC in non-TPU case, which must be defined and added to this
    dict outside the strategy scope.
    We disclude ECE in TPU case, which currently throws an XLA error on TPU.

  Args:
    use_tpu: bool, is run using TPU.
    num_bins: number of ECE bins.

  Returns:
    dict, metrics
  """
  metrics = {
      'train/negative_log_likelihood': tf.keras.metrics.Mean(),
      'train/accuracy': tf.keras.metrics.BinaryAccuracy(),
      'train/loss': tf.keras.metrics.Mean(),  # NLL + L2
      'validation/negative_log_likelihood': tf.keras.metrics.Mean(),
      'validation/accuracy': tf.keras.metrics.BinaryAccuracy(),
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.BinaryAccuracy(),
  }

  if use_tpu:
    # AUC does not yet work within GPU strategy scope, but does for TPU
    metrics.update({
        'train/auc': tf.keras.metrics.AUC(),
        'validation/auc': tf.keras.metrics.AUC(),
        'test/auc': tf.keras.metrics.AUC(),
    })
  else:
    # ECE does not yet work on TPU
    metrics.update({
        'train/ece': um.ExpectedCalibrationError(num_bins=num_bins),
        'validation/ece': um.ExpectedCalibrationError(num_bins=num_bins),
        'test/ece': um.ExpectedCalibrationError(num_bins=num_bins),
    })

  return metrics
Ejemplo n.º 3
0
  def testECEBinaryClassification(self):
    num_bins = 10
    pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])
    # max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]
    # pred_class: [1, 0, 0, 1, 1, 0, 1, 1]
    labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.])
    n = len(pred_probs)

    # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,
    # [0.9, 1) and are numbered starting at zero.
    bin_counts = np.array([0, 0, 0, 0, 0, 2, 3, 1, 2, 0])
    bin_correct_sums = np.array([0, 0, 0, 0, 0, 1, 2, 0, 2, 0])
    bin_prob_sums = np.array([0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68,
                              0.71, 0.81 + 0.85, 0])

    correct_ece = 0.
    bin_accs = np.array([0.] * num_bins)
    bin_confs = np.array([0.] * num_bins)
    for i in range(num_bins):
      if bin_counts[i] > 0:
        bin_accs[i] = bin_correct_sums[i] / bin_counts[i]
        bin_confs[i] = bin_prob_sums[i] / bin_counts[i]
        correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])

    metric = um.ExpectedCalibrationError(
        num_bins, name='ECE', dtype=tf.float64)
    self.assertLen(metric.variables, 3)

    ece1 = metric(labels, pred_probs)
    self.assertAllClose(ece1, correct_ece)

    actual_bin_counts = tf.convert_to_tensor(metric.counts)
    actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
    actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
    self.assertAllEqual(bin_counts, actual_bin_counts)
    self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
    self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)

    # Test various types of input shapes.
    metric.reset_states()
    metric.update_state(labels[:2], pred_probs[:2])
    metric.update_state(labels[2:6].reshape(2, 2),
                        pred_probs[2:6].reshape(2, 2))
    metric.update_state(labels[6:7], pred_probs[6:7])
    ece2 = metric(labels[7:, np.newaxis], pred_probs[7:, np.newaxis])
    ece3 = metric.result()
    self.assertAllClose(ece2, ece3)
    self.assertAllClose(ece3, correct_ece)

    actual_bin_counts = tf.convert_to_tensor(metric.counts)
    actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
    actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
    self.assertAllEqual(bin_counts, actual_bin_counts)
    self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
    self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)
Ejemplo n.º 4
0
  def testECEMulticlassClassification(self):
    num_bins = 10
    pred_probs = [
        [0.31, 0.32, 0.27],
        [0.37, 0.33, 0.30],
        [0.30, 0.31, 0.39],
        [0.61, 0.38, 0.01],
        [0.10, 0.65, 0.25],
        [0.91, 0.05, 0.04],
    ]
    # max_pred_probs: [0.32, 0.37, 0.39, 0.61, 0.65, 0.91]
    # pred_class: [1, 0, 2, 0, 1, 0]
    labels = [1., 0, 0., 1., 0., 0.]
    n = len(pred_probs)

    # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,
    # [0.9, 1) and are numbered starting at zero.
    bin_counts = [0, 0, 0, 3, 0, 0, 2, 0, 0, 1]
    bin_correct_sums = [0, 0, 0, 2, 0, 0, 0, 0, 0, 1]
    bin_prob_sums = [0, 0, 0, 0.32 + 0.37 + 0.39, 0, 0, 0.61 + 0.65, 0, 0, 0.91]

    correct_ece = 0.
    bin_accs = [0.] * num_bins
    bin_confs = [0.] * num_bins
    for i in range(num_bins):
      if bin_counts[i] > 0:
        bin_accs[i] = bin_correct_sums[i] / bin_counts[i]
        bin_confs[i] = bin_prob_sums[i] / bin_counts[i]
        correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])

    metric = um.ExpectedCalibrationError(
        num_bins, name='ECE', dtype=tf.float64)
    self.assertLen(metric.variables, 3)

    metric.update_state(labels[:4], pred_probs[:4])
    ece1 = metric(labels[4:], pred_probs[4:])
    ece2 = metric.result()
    self.assertAllClose(ece1, ece2)
    self.assertAllClose(ece2, correct_ece)

    actual_bin_counts = tf.convert_to_tensor(metric.counts)
    actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
    actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
    self.assertAllEqual(bin_counts, actual_bin_counts)
    self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
    self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)
Ejemplo n.º 5
0
  def testECEBinaryClassificationKerasModel(self):
    num_bins = 10
    pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])
    # max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]
    # pred_class: [1, 0, 0, 1, 1, 0, 1, 1]
    labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.])
    n = len(pred_probs)

    # Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,
    # [0.9, 1) and are numbered starting at zero.
    bin_counts = [0, 0, 0, 0, 0, 2, 3, 1, 2, 0]
    bin_correct_sums = [0, 0, 0, 0, 0, 1, 2, 0, 2, 0]
    bin_prob_sums = [0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68, 0.71,
                     0.81 + 0.85, 0]

    correct_ece = 0.
    bin_accs = [0.] * num_bins
    bin_confs = [0.] * num_bins
    for i in range(num_bins):
      if bin_counts[i] > 0:
        bin_accs[i] = bin_correct_sums[i] / bin_counts[i]
        bin_confs[i] = bin_prob_sums[i] / bin_counts[i]
        correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])

    metric = um.ExpectedCalibrationError(num_bins, name='ECE')
    self.assertLen(metric.variables, 3)

    model = tf.keras.models.Sequential([tf.keras.layers.Lambda(lambda x: 1*x)])
    model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[metric])
    outputs = model.predict(pred_probs)
    self.assertAllClose(pred_probs.reshape([n, 1]), outputs)
    _, ece = model.evaluate(pred_probs, labels)
    self.assertAllClose(ece, correct_ece)

    actual_bin_counts = tf.convert_to_tensor(metric.counts)
    actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
    actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
    self.assertAllEqual(bin_counts, actual_bin_counts)
    self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
    self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)
Ejemplo n.º 6
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

  builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                use_bfloat16=False)
  clean_test_dataset = builder.as_dataset(split=tfds.Split.TEST,
                                          batch_size=batch_size)
  test_datasets = {'clean': clean_test_dataset}
  corruption_types, max_intensity = utils.load_corrupted_test_info()
  for name in corruption_types:
    for intensity in range(1, max_intensity + 1):
      dataset_name = '{0}_{1}'.format(name, intensity)
      test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
          corruption_name=name,
          corruption_intensity=intensity,
          batch_size=batch_size,
          drop_remainder=True,
          use_bfloat16=False)

  model = ub.models.resnet50_deterministic(input_shape=(224, 224, 3),
                                           num_classes=NUM_CLASSES)

  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())
  # Search for checkpoints from their index file; then remove the index suffix.
  ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
                                                     '**/*.index'))
  ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
  ensemble_size = len(ensemble_filenames)
  logging.info('Ensemble size: %s', ensemble_size)
  logging.info('Ensemble number of weights: %s',
               ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, ensemble_filename in enumerate(ensemble_filenames):
    checkpoint.restore(ensemble_filename)
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits = []
        test_iterator = iter(test_dataset)
        for _ in range(steps_per_eval):
          features, _ = next(test_iterator)  # pytype: disable=attribute-error
          logits.append(model(features, training=False))

        logits = tf.concat(logits, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits.numpy())
      percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent,
                                            m + 1,
                                            ensemble_size,
                                            n + 1,
                                            num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
      'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
  }
  corrupt_metrics = {}
  for name in test_datasets:
    corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
    corrupt_metrics['test/accuracy_{}'.format(name)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
    corrupt_metrics['test/ece_{}'.format(
        name)] = um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)

  # Evaluate model predictions.
  for n, (name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for m in range(ensemble_size):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      _, labels = next(test_iterator)  # pytype: disable=attribute-error
      logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
      labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
      negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
      per_probs = tf.nn.softmax(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      if name == 'clean':
        gibbs_ce = um.gibbs_cross_entropy(labels, logits)
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)
      else:
        corrupt_metrics['test/nll_{}'.format(name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(name)].update_state(
            labels, probs)

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                    corruption_types,
                                                    max_intensity,
                                                    FLAGS.alexnet_errors_path)
  total_results = {name: metric.result() for name, metric in metrics.items()}
  total_results.update(corrupt_results)
  logging.info('Metrics: %s', total_results)
Ejemplo n.º 7
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')

  tf.random.set_seed(FLAGS.seed)
  logging.info('Model checkpoint will be saved at %s', FLAGS.output_dir)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  test_batch_size = batch_size
  data_buffer_size = batch_size * 10

  ind_dataset_builder = ds.WikipediaToxicityDataset(
      split='test',
      data_dir=FLAGS.in_dataset_dir,
      shuffle_buffer_size=data_buffer_size)
  ood_dataset_builder = ds.CivilCommentsDataset(
      split='test',
      data_dir=FLAGS.ood_dataset_dir,
      shuffle_buffer_size=data_buffer_size)
  ood_identity_dataset_builder = ds.CivilCommentsIdentitiesDataset(
      split='test',
      data_dir=FLAGS.identity_dataset_dir,
      shuffle_buffer_size=data_buffer_size)

  test_dataset_builders = {
      'ind': ind_dataset_builder,
      'ood': ood_dataset_builder,
      'ood_identity': ood_identity_dataset_builder,
  }

  class_weight = utils.create_class_weight(
      test_dataset_builders=test_dataset_builders)
  logging.info('class_weight: %s', str(class_weight))

  ds_info = ind_dataset_builder.tfds_info
  # Positive and negative classes.
  num_classes = ds_info.metadata['num_classes']

  test_datasets = {}
  steps_per_eval = {}
  for dataset_name, dataset_builder in test_dataset_builders.items():
    test_datasets[dataset_name] = dataset_builder.load(
        batch_size=test_batch_size)
    steps_per_eval[dataset_name] = (
        dataset_builder.num_examples // test_batch_size)

  logging.info('Building %s model', FLAGS.model_family)

  bert_config_dir, _ = utils.resolve_bert_ckpt_and_config_dir(
      FLAGS.bert_model_type, FLAGS.bert_dir, FLAGS.bert_config_dir,
      FLAGS.bert_ckpt_dir)
  bert_config = utils.create_config(bert_config_dir)

  gp_layer_kwargs = dict(
      num_inducing=FLAGS.gp_hidden_dim,
      gp_kernel_scale=FLAGS.gp_scale,
      gp_output_bias=FLAGS.gp_bias,
      normalize_input=FLAGS.gp_input_normalization,
      gp_cov_momentum=FLAGS.gp_cov_discount_factor,
      gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty)
  spec_norm_kwargs = dict(
      iteration=FLAGS.spec_norm_iteration,
      norm_multiplier=FLAGS.spec_norm_bound)

  model, _ = ub.models.SngpBertBuilder(
      num_classes=num_classes,
      bert_config=bert_config,
      gp_layer_kwargs=gp_layer_kwargs,
      spec_norm_kwargs=spec_norm_kwargs,
      use_gp_layer=FLAGS.use_gp_layer,
      use_spec_norm_att=FLAGS.use_spec_norm_att,
      use_spec_norm_ffn=FLAGS.use_spec_norm_ffn,
      use_layer_norm_att=FLAGS.use_layer_norm_att,
      use_layer_norm_ffn=FLAGS.use_layer_norm_ffn,
      use_spec_norm_plr=FLAGS.use_spec_norm_plr)

  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())

  # Search for checkpoints from their index file; then remove the index suffix.
  ensemble_filenames = tf.io.gfile.glob(
      os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
  ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
  if FLAGS.num_models > len(ensemble_filenames):
    raise ValueError('Number of models to be included in the ensemble '
                     'should be less than total number of models in '
                     'the checkpoint_dir.')
  ensemble_filenames = ensemble_filenames[:FLAGS.num_models]
  ensemble_size = len(ensemble_filenames)
  logging.info('Ensemble size: %s', ensemble_size)
  logging.info('Ensemble number of weights: %s',
               ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, ensemble_filename in enumerate(ensemble_filenames):
    checkpoint.restore(ensemble_filename).assert_existing_objects_matched()
    for n, (dataset_name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=dataset_name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits_list = []
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval[dataset_name]):
          try:
            inputs = next(test_iterator)
          except StopIteration:
            continue
          features, labels, _ = utils.create_feature_and_label(inputs)
          logits = model(features, training=False)

          if isinstance(logits, (list, tuple)):
            # If model returns a tuple of (logits, covmat), extract both.
            logits, covmat = logits
          else:
            covmat = tf.eye(test_batch_size)

          if FLAGS.use_bfloat16:
            logits = tf.cast(logits, tf.float32)
            covmat = tf.cast(covmat, tf.float32)

          logits = ed.layers.utils.mean_field_logits(
              logits, covmat,
              mean_field_factor=FLAGS.gp_mean_field_factor_ensemble)

          logits_list.append(logits)

        logits_all = tf.concat(logits_list, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits_all.numpy())
      percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                            n + 1, num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/auroc': tf.keras.metrics.AUC(curve='ROC'),
      'test/aupr': tf.keras.metrics.AUC(curve='PR'),
      'test/brier': tf.keras.metrics.MeanSquaredError(),
      'test/brier_weighted': tf.keras.metrics.MeanSquaredError(),
      'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
      'test/acc': tf.keras.metrics.Accuracy(),
      'test/acc_weighted': tf.keras.metrics.Accuracy(),
      'test/precision': tf.keras.metrics.Precision(),
      'test/recall': tf.keras.metrics.Recall(),
      'test/f1': tfa_metrics.F1Score(
          num_classes=num_classes, average='micro',
          threshold=FLAGS.ece_label_threshold)
  }
  for fraction in FLAGS.fractions:
    metrics.update({
        'test_collab_acc/collab_acc_{}'.format(fraction):
            um.OracleCollaborativeAccuracy(
                fraction=float(fraction), num_bins=FLAGS.num_bins)
    })
  for dataset_name, test_dataset in test_datasets.items():
    if dataset_name != 'ind':
      metrics.update({
          'test/nll_{}'.format(dataset_name):
              tf.keras.metrics.Mean(),
          'test/auroc_{}'.format(dataset_name):
              tf.keras.metrics.AUC(curve='ROC'),
          'test/aupr_{}'.format(dataset_name):
              tf.keras.metrics.AUC(curve='PR'),
          'test/brier_{}'.format(dataset_name):
              tf.keras.metrics.MeanSquaredError(),
          'test/brier_weighted_{}'.format(dataset_name):
              tf.keras.metrics.MeanSquaredError(),
          'test/ece_{}'.format(dataset_name):
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
          'test/acc_weighted_{}'.format(dataset_name):
              tf.keras.metrics.Accuracy(),
          'test/acc_{}'.format(dataset_name):
              tf.keras.metrics.Accuracy(),
          'test/precision_{}'.format(dataset_name):
              tf.keras.metrics.Precision(),
          'test/recall_{}'.format(dataset_name):
              tf.keras.metrics.Recall(),
          'test/f1_{}'.format(dataset_name):
              tfa_metrics.F1Score(
                  num_classes=num_classes, average='micro',
                  threshold=FLAGS.ece_label_threshold)
      })
      for fraction in FLAGS.fractions:
        metrics.update({
            'test_collab_acc/collab_acc_{}_{}'.format(fraction, dataset_name):
                um.OracleCollaborativeAccuracy(
                    fraction=float(fraction), num_bins=FLAGS.num_bins)
        })

  @tf.function
  def generate_sample_weight(labels, class_weight, label_threshold=0.7):
    """Generate sample weight for weighted accuracy calculation."""
    if label_threshold != 0.7:
      logging.warning('The class weight was based on `label_threshold` = 0.7, '
                      'and weighted accuracy/brier will be meaningless if '
                      '`label_threshold` is not equal to this value, which is '
                      'recommended by Jigsaw Conversation AI team.')
    labels_int = tf.cast(labels > label_threshold, tf.int32)
    sample_weight = tf.gather(class_weight, labels_int)
    return sample_weight

  # Evaluate model predictions.
  for n, (dataset_name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for m in range(ensemble_size):
      filename = '{dataset}_{member}.npy'.format(dataset=dataset_name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    texts_list = []
    logits_list = []
    labels_list = []
    # Use dict to collect additional labels specified by additional label names.
    # Here we use  `OrderedDict` to get consistent ordering for this dict so
    # we can retrieve the predictions for each identity labels in Colab.
    additional_labels_dict = collections.OrderedDict()
    for step in range(steps_per_eval[dataset_name]):
      try:
        inputs = next(test_iterator)  # type: Mapping[Text, tf.Tensor]  # pytype: disable=annotation-type-mismatch
      except StopIteration:
        continue
      features, labels, additional_labels = (
          utils.create_feature_and_label(inputs))
      logits = logits_dataset[:, (step * batch_size):((step + 1) * batch_size)]
      loss_logits = tf.squeeze(logits, axis=-1)
      negative_log_likelihood = um.ensemble_cross_entropy(
          labels, loss_logits, binary=True)

      per_probs = tf.nn.sigmoid(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      # Cast labels to discrete for ECE computation
      ece_labels = tf.cast(labels > FLAGS.ece_label_threshold, tf.float32)
      one_hot_labels = tf.one_hot(tf.cast(ece_labels, tf.int32),
                                  depth=num_classes)
      ece_probs = tf.concat([1. - probs, probs], axis=1)
      pred_labels = tf.math.argmax(ece_probs, axis=-1)
      auc_probs = tf.squeeze(probs, axis=1)

      texts_list.append(inputs['input_ids'])
      logits_list.append(logits)
      labels_list.append(labels)
      if 'identity' in dataset_name:
        for identity_label_name in utils.IDENTITY_LABELS:
          if identity_label_name not in additional_labels_dict:
            additional_labels_dict[identity_label_name] = []
          additional_labels_dict[identity_label_name].append(
              additional_labels[identity_label_name].numpy())

      sample_weight = generate_sample_weight(
          labels, class_weight['test/{}'.format(dataset_name)],
          FLAGS.ece_label_threshold)
      if dataset_name == 'ind':
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/auroc'].update_state(labels, auc_probs)
        metrics['test/aupr'].update_state(labels, auc_probs)
        metrics['test/brier'].update_state(labels, auc_probs)
        metrics['test/brier_weighted'].update_state(
            tf.expand_dims(labels, -1), probs, sample_weight=sample_weight)
        metrics['test/ece'].add_batch(ece_probs, label=ece_labels)
        metrics['test/acc'].update_state(ece_labels, pred_labels)
        metrics['test/acc_weighted'].update_state(
            ece_labels, pred_labels, sample_weight=sample_weight)
        metrics['test/precision'].update_state(ece_labels, pred_labels)
        metrics['test/recall'].update_state(ece_labels, pred_labels)
        metrics['test/f1'].update_state(one_hot_labels, ece_probs)
        for fraction in FLAGS.fractions:
          metrics['test_collab_acc/collab_acc_{}'.format(
              fraction)].update_state(ece_labels, ece_probs)
      else:
        metrics['test/nll_{}'.format(dataset_name)].update_state(
            negative_log_likelihood)
        metrics['test/auroc_{}'.format(dataset_name)].update_state(
            labels, auc_probs)
        metrics['test/aupr_{}'.format(dataset_name)].update_state(
            labels, auc_probs)
        metrics['test/brier_{}'.format(dataset_name)].update_state(
            labels, auc_probs)
        metrics['test/brier_weighted_{}'.format(dataset_name)].update_state(
            tf.expand_dims(labels, -1), probs, sample_weight=sample_weight)
        metrics['test/ece_{}'.format(dataset_name)].add_batch(
            ece_probs, label=ece_labels)
        metrics['test/acc_{}'.format(dataset_name)].update_state(
            ece_labels, pred_labels)
        metrics['test/acc_weighted_{}'.format(dataset_name)].update_state(
            ece_labels, pred_labels, sample_weight=sample_weight)
        metrics['test/precision_{}'.format(dataset_name)].update_state(
            ece_labels, pred_labels)
        metrics['test/recall_{}'.format(dataset_name)].update_state(
            ece_labels, pred_labels)
        metrics['test/f1_{}'.format(dataset_name)].update_state(
            one_hot_labels, ece_probs)
        for fraction in FLAGS.fractions:
          metrics['test_collab_acc/collab_acc_{}_{}'.format(
              fraction, dataset_name)].update_state(ece_labels, ece_probs)

    texts_all = tf.concat(texts_list, axis=0)
    logits_all = tf.concat(logits_list, axis=1)
    labels_all = tf.concat(labels_list, axis=0)
    additional_labels_all = []
    if additional_labels_dict:
      additional_labels_all = list(additional_labels_dict.values())

    utils.save_prediction(
        texts_all.numpy(),
        path=os.path.join(FLAGS.output_dir, 'texts_{}'.format(dataset_name)))
    utils.save_prediction(
        labels_all.numpy(),
        path=os.path.join(FLAGS.output_dir, 'labels_{}'.format(dataset_name)))
    utils.save_prediction(
        logits_all.numpy(),
        path=os.path.join(FLAGS.output_dir, 'logits_{}'.format(dataset_name)))
    if 'identity' in dataset_name:
      utils.save_prediction(
          np.array(additional_labels_all),
          path=os.path.join(FLAGS.output_dir,
                            'additional_labels_{}'.format(dataset_name)))

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  total_results = {name: metric.result() for name, metric in metrics.items()}
  # Metrics from Robustness Metrics (like ECE) will return a dict with a
  # single key/value, instead of a scalar.
  total_results = {
      k: (list(v.values())[0] if isinstance(v, dict) else v)
      for k, v in total_results.items()
  }
  logging.info('Metrics: %s', total_results)
Ejemplo n.º 8
0
def main(argv):
  del argv  # Unused arg.

  tf.random.set_seed(FLAGS.seed)

  if FLAGS.version2:
    per_core_bs_train = FLAGS.per_core_batch_size // (FLAGS.ensemble_size *
                                                      FLAGS.num_train_samples)
    per_core_bs_eval = FLAGS.per_core_batch_size // (FLAGS.ensemble_size *
                                                     FLAGS.num_eval_samples)
  else:
    per_core_bs_train = FLAGS.per_core_batch_size // FLAGS.num_train_samples
    per_core_bs_eval = FLAGS.per_core_batch_size // FLAGS.num_eval_samples
  batch_size_train = per_core_bs_train * FLAGS.num_cores
  batch_size_eval = per_core_bs_eval * FLAGS.num_cores

  logging.info('Saving checkpoints at %s', FLAGS.output_dir)

  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.TPUStrategy(resolver)

  train_dataset = utils.load_dataset(
      split=tfds.Split.TRAIN,
      name=FLAGS.dataset,
      batch_size=batch_size_train,
      use_bfloat16=FLAGS.use_bfloat16,
      normalize=False)
  clean_test_dataset = utils.load_dataset(
      split=tfds.Split.TEST,
      name=FLAGS.dataset,
      batch_size=batch_size_eval,
      use_bfloat16=FLAGS.use_bfloat16,
      normalize=False)
  train_dataset = strategy.experimental_distribute_dataset(train_dataset)
  test_datasets = {
      'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
  }
  if FLAGS.corruptions_interval > 0:
    if FLAGS.dataset == 'cifar10':
      load_c_dataset = utils.load_cifar10_c
    else:
      load_c_dataset = functools.partial(utils.load_cifar100_c,
                                         path=FLAGS.cifar100_c_path)
    corruption_types, max_intensity = utils.load_corrupted_test_info(
        FLAGS.dataset)
    for corruption in corruption_types:
      for intensity in range(1, max_intensity + 1):
        dataset = load_c_dataset(
            corruption_name=corruption,
            corruption_intensity=intensity,
            batch_size=batch_size_eval,
            use_bfloat16=FLAGS.use_bfloat16,
            normalize=False)
        test_datasets['{0}_{1}'.format(corruption, intensity)] = (
            strategy.experimental_distribute_dataset(dataset))

  ds_info = tfds.builder(FLAGS.dataset).info
  train_dataset_size = ds_info.splits['train'].num_examples
  test_dataset_size = ds_info.splits['test'].num_examples
  num_classes = ds_info.features['label'].num_classes

  steps_per_epoch = train_dataset_size // batch_size_train
  steps_per_eval = test_dataset_size // batch_size_eval

  if FLAGS.use_bfloat16:
    policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
    tf.keras.mixed_precision.experimental.set_policy(policy)

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  with strategy.scope():
    logging.info('Building Keras ResNet-32 model')
    model = resnet_cifar_model.rank1_resnet_v1(
        input_shape=ds_info.features['image'].shape,
        depth=32,
        num_classes=num_classes,
        width_multiplier=4,
        alpha_initializer=FLAGS.alpha_initializer,
        gamma_initializer=FLAGS.gamma_initializer,
        alpha_regularizer=FLAGS.alpha_regularizer,
        gamma_regularizer=FLAGS.gamma_regularizer,
        use_additive_perturbation=FLAGS.use_additive_perturbation,
        ensemble_size=FLAGS.ensemble_size,
        random_sign_init=FLAGS.random_sign_init,
        dropout_rate=FLAGS.dropout_rate)
    logging.info(model.summary())
    base_lr = FLAGS.base_learning_rate * batch_size_train / 128
    lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                       for start_epoch_str in FLAGS.lr_decay_epochs]
    lr_schedule = utils.LearningRateSchedule(
        steps_per_epoch,
        base_lr,
        decay_ratio=FLAGS.lr_decay_ratio,
        decay_epochs=lr_decay_epochs,
        warmup_epochs=FLAGS.lr_warmup_epochs)
    optimizer = tf.keras.optimizers.SGD(
        lr_schedule, momentum=0.9, nesterov=True)
    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/loss': tf.keras.metrics.Mean(),
    }
    if FLAGS.corruptions_interval > 0:
      corrupt_metrics = {}
      for intensity in range(1, max_intensity + 1):
        for corruption in corruption_types:
          dataset_name = '{0}_{1}'.format(corruption, intensity)
          corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
              tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    test_diversity = {}
    training_diversity = {}
    if FLAGS.ensemble_size > 1:
      for i in range(FLAGS.ensemble_size):
        metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
        metrics['test/accuracy_member_{}'.format(i)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
      test_diversity = {
          'test/disagreement': tf.keras.metrics.Mean(),
          'test/average_kl': tf.keras.metrics.Mean(),
          'test/cosine_similarity': tf.keras.metrics.Mean(),
      }
      training_diversity = {
          'train/disagreement': tf.keras.metrics.Mean(),
          'train/average_kl': tf.keras.metrics.Mean(),
          'train/cosine_similarity': tf.keras.metrics.Mean(),
      }

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

  @tf.function
  def train_step(iterator):
    """Training StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      if FLAGS.version2 and FLAGS.ensemble_size > 1:
        images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
        if not (FLAGS.member_sampling or FLAGS.expected_probs):
          labels = tf.tile(labels, [FLAGS.ensemble_size])

      if FLAGS.num_train_samples > 1:
        images = tf.tile(images, [FLAGS.num_train_samples, 1, 1, 1])

      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        probs = tf.nn.softmax(logits)
        # Diversity evaluation.
        if FLAGS.version2 and FLAGS.ensemble_size > 1:
          per_probs = tf.reshape(
              probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))

          diversity_results = um.average_pairwise_diversity(
              per_probs, FLAGS.ensemble_size)

        if FLAGS.num_train_samples > 1:
          probs = tf.reshape(probs,
                             tf.concat([[FLAGS.num_train_samples, -1],
                                        probs.shape[1:]], 0))
          probs = tf.reduce_mean(probs, 0)

        if FLAGS.member_sampling and FLAGS.version2 and FLAGS.ensemble_size > 1:
          idx = tf.random.uniform([], maxval=FLAGS.ensemble_size,
                                  dtype=tf.int64)
          idx_one_hot = tf.expand_dims(tf.one_hot(idx, FLAGS.ensemble_size,
                                                  dtype=probs.dtype), 0)
          probs_shape = probs.shape
          probs = tf.reshape(probs, [FLAGS.ensemble_size, -1])
          probs = tf.matmul(idx_one_hot, probs)
          probs = tf.reshape(probs, tf.concat([[-1], probs_shape[1:]], 0))

        elif FLAGS.expected_probs and FLAGS.version2 and FLAGS.ensemble_size > 1:
          probs = tf.reshape(probs,
                             tf.concat([[FLAGS.ensemble_size, -1],
                                        probs.shape[1:]], 0))
          probs = tf.reduce_mean(probs, 0)

        negative_log_likelihood = tf.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

        filtered_variables = []
        for var in model.trainable_variables:
          # Apply l2 on the slow weights and bias terms. This excludes BN
          # parameters and fast weight approximate posterior/prior parameters,
          # but pay caution to their naming scheme.
          if 'kernel' in var.name or 'bias' in var.name:
            filtered_variables.append(tf.reshape(var, (-1,)))

        l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
            tf.concat(filtered_variables, axis=0))
        kl = sum(model.losses) / train_dataset_size
        kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
        kl_scale /= FLAGS.kl_annealing_steps
        kl_scale = tf.minimum(1., kl_scale)
        kl_loss = kl_scale * kl

        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        loss = negative_log_likelihood + l2_loss + kl_loss
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)

      # Separate learning rate implementation.
      grad_list = []
      if FLAGS.fast_weight_lr_multiplier != 1.0:
        grads_and_vars = list(zip(grads, model.trainable_variables))
        for vec, var in grads_and_vars:
          # Apply different learning rate on the fast weight approximate
          # posterior/prior parameters. This is excludes BN and slow weights,
          # but pay caution to the naming scheme.
          if ('batch_norm' not in var.name and 'kernel' not in var.name):
            grad_list.append((vec * FLAGS.fast_weight_lr_multiplier, var))
          else:
            grad_list.append((vec, var))
        optimizer.apply_gradients(grad_list)
      else:
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

      metrics['train/ece'].update_state(labels, probs)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(labels, probs)
      if FLAGS.version2 and FLAGS.ensemble_size > 1:
        for k, v in diversity_results.items():
          training_diversity['train/' + k].update_state(v)

    strategy.run(step_fn, args=(next(iterator),))

  @tf.function
  def test_step(iterator, dataset_name):
    """Evaluation StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      if FLAGS.ensemble_size > 1:
        images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
      if FLAGS.num_eval_samples > 1:
        images = tf.tile(images, [FLAGS.num_eval_samples, 1, 1, 1])
      logits = model(images, training=False)
      probs = tf.nn.softmax(logits)

      if FLAGS.num_eval_samples > 1:
        probs = tf.reshape(probs,
                           tf.concat([[FLAGS.num_eval_samples, -1],
                                      probs.shape[1:]], 0))
        probs = tf.reduce_mean(probs, 0)

      if FLAGS.ensemble_size > 1:
        per_probs = tf.split(probs,
                             num_or_size_splits=FLAGS.ensemble_size,
                             axis=0)
        if dataset_name == 'clean':
          per_probs_tensor = tf.reshape(
              probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
          diversity_results = um.average_pairwise_diversity(
              per_probs_tensor, FLAGS.ensemble_size)

          for k, v in diversity_results.items():
            test_diversity['test/' + k].update_state(v)

          for i in range(FLAGS.ensemble_size):
            member_probs = per_probs[i]
            member_nll = tf.keras.losses.sparse_categorical_crossentropy(
                labels, member_probs)
            metrics['test/nll_member_{}'.format(i)].update_state(member_nll)
            metrics['test/accuracy_member_{}'.format(i)].update_state(
                labels, member_probs)

        probs = tf.reduce_mean(per_probs, axis=0)

      negative_log_likelihood = tf.reduce_mean(
          tf.keras.losses.sparse_categorical_crossentropy(labels, probs))
      filtered_variables = []
      for var in model.trainable_variables:
        if 'kernel' in var.name or 'bias' in var.name:
          filtered_variables.append(tf.reshape(var, (-1,)))

      kl = sum(model.losses) / test_dataset_size
      l2_loss = kl + FLAGS.l2 * 2 * tf.nn.l2_loss(
          tf.concat(filtered_variables, axis=0))
      loss = negative_log_likelihood + l2_loss
      if dataset_name == 'clean':
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)
        metrics['test/loss'].update_state(loss)
      else:
        corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
            labels, probs)

    strategy.run(step_fn, args=(next(iterator),))

  train_iterator = iter(train_dataset)
  start_time = time.time()
  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)
    for step in range(steps_per_epoch):
      train_step(train_iterator)

      current_step = epoch * steps_per_epoch + (step + 1)
      max_steps = steps_per_epoch * FLAGS.train_epochs
      time_elapsed = time.time() - start_time
      steps_per_sec = float(current_step) / time_elapsed
      eta_seconds = (max_steps - current_step) / steps_per_sec
      message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                     current_step / max_steps,
                     epoch + 1,
                     FLAGS.train_epochs,
                     steps_per_sec,
                     eta_seconds / 60,
                     time_elapsed / 60))
      work_unit.set_notes(message)
      if step % 20 == 0:
        logging.info(message)

    datasets_to_evaluate = {'clean': test_datasets['clean']}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      datasets_to_evaluate = test_datasets
    for dataset_name, test_dataset in datasets_to_evaluate.items():
      test_iterator = iter(test_dataset)
      logging.info('Testing on dataset %s', dataset_name)
      for step in range(steps_per_eval):
        if step % 20 == 0:
          logging.info('Starting to run eval step %s of epoch: %s', step,
                       epoch)
        test_step(test_iterator, dataset_name)
      logging.info('Done with testing on %s', dataset_name)

    corrupt_results = {}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                        corruption_types,
                                                        max_intensity)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    for i in range(FLAGS.ensemble_size):
      logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
                   i, metrics['test/nll_member_{}'.format(i)].result(),
                   metrics['test/accuracy_member_{}'.format(i)].result() * 100)
    total_metrics = itertools.chain(metrics.items(),
                                    training_diversity.items(),
                                    test_diversity.items())
    total_results = {name: metric.result() for name, metric in total_metrics}
    total_results.update(corrupt_results)
    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)

    for name, result in total_results.items():
      name = name.replace('/', '_')
      if 'negative_log_likelihood' in name:
        # Plots sort WIDs from high-to-low so look at maximization objectives.
        name = name.replace('negative_log_likelihood', 'log_likelihood')
        result = -result
      objective = work_unit.get_measurement_series(name)
      objective.create_measurement(result, epoch + 1)

    for _, metric in total_metrics:
      metric.reset_states()
    summary_writer.flush()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(
          os.path.join(FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)
Ejemplo n.º 9
0
def main(argv):
  del argv  # unused arg
  tf.io.gfile.makedirs(FLAGS.output_dir)
  logging.info('Saving checkpoints at %s', FLAGS.output_dir)
  tf.random.set_seed(FLAGS.seed)

  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.TPUStrategy(resolver)

  ds_info = tfds.builder(FLAGS.dataset).info
  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_epoch = ds_info.splits['train'].num_examples // batch_size
  steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size
  num_classes = ds_info.features['label'].num_classes

  train_dataset = utils.load_dataset(
      split=tfds.Split.TRAIN,
      name=FLAGS.dataset,
      batch_size=batch_size,
      use_bfloat16=FLAGS.use_bfloat16)
  clean_test_dataset = utils.load_dataset(
      split=tfds.Split.TEST,
      name=FLAGS.dataset,
      batch_size=test_batch_size,
      use_bfloat16=FLAGS.use_bfloat16)
  train_dataset = strategy.experimental_distribute_dataset(train_dataset)
  test_datasets = {
      'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
  }
  if FLAGS.corruptions_interval > 0:
    if FLAGS.dataset == 'cifar10':
      load_c_dataset = utils.load_cifar10_c
    else:
      load_c_dataset = functools.partial(
          utils.load_cifar100_c, path=FLAGS.cifar100_c_path)
    corruption_types, max_intensity = utils.load_corrupted_test_info(
        FLAGS.dataset)
    for corruption in corruption_types:
      for intensity in range(1, max_intensity + 1):
        dataset = load_c_dataset(
            corruption_name=corruption,
            corruption_intensity=intensity,
            batch_size=test_batch_size,
            use_bfloat16=FLAGS.use_bfloat16)
        test_datasets['{0}_{1}'.format(corruption, intensity)] = (
            strategy.experimental_distribute_dataset(dataset))

  if FLAGS.use_bfloat16:
    policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
    tf.keras.mixed_precision.experimental.set_policy(policy)

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  with strategy.scope():
    logging.info('Building ResNet model')
    model = ub.models.wide_resnet_condconv(
        input_shape=ds_info.features['image'].shape,
        depth=28,
        width_multiplier=FLAGS.resnet_width_multiplier,
        num_classes=num_classes,
        num_experts=FLAGS.num_experts,
        per_core_batch_size=FLAGS.per_core_batch_size,
        use_cond_dense=FLAGS.use_cond_dense,
        reduce_dense_outputs=FLAGS.reduce_dense_outputs,
        cond_placement=FLAGS.cond_placement,
        routing_fn=FLAGS.routing_fn,
        normalize_routing=FLAGS.normalize_routing,
        normalize_dense_routing=FLAGS.normalize_dense_routing,
        top_k=FLAGS.top_k,
        routing_pooling=FLAGS.routing_pooling,
        l2=FLAGS.l2)
    # reuse_routing=FLAGS.reuse_routing,
    # shared_routing_type=FLAGS.shared_routing_type)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Linearly scale learning rate and the decay epochs by vanilla settings.
    base_lr = FLAGS.base_learning_rate * batch_size / 128
    lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                       for start_epoch_str in FLAGS.lr_decay_epochs]
    lr_schedule = utils.LearningRateSchedule(
        steps_per_epoch,
        base_lr,
        decay_ratio=FLAGS.lr_decay_ratio,
        decay_epochs=lr_decay_epochs,
        warmup_epochs=FLAGS.lr_warmup_epochs)
    optimizer = tf.keras.optimizers.SGD(
        lr_schedule, momentum=0.9, nesterov=True)
    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
      metrics.update({
          'test/nll_poe':
              tf.keras.metrics.Mean(),
          'test/nll_moe':
              tf.keras.metrics.Mean(),
          'test/nll_unweighted_poe':
              tf.keras.metrics.Mean(),
          'test/nll_unweighted_moe':
              tf.keras.metrics.Mean(),
          'test/unweighted_gibbs_ce':
              tf.keras.metrics.Mean(),
          'test/ece_unweighted_moe':
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
          'test/accuracy_unweighted_moe':
              tf.keras.metrics.SparseCategoricalAccuracy(),
          'test/ece_poe':
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
          'test/accuracy_poe':
              tf.keras.metrics.SparseCategoricalAccuracy(),
          'test/ece_unweighted_poe':
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
          'test/accuracy_unweighted_poe':
              tf.keras.metrics.SparseCategoricalAccuracy(),
      })
      for idx in range(FLAGS.num_experts):
        metrics['test/dense_routing_weight_{}'.format(
            idx)] = tf.keras.metrics.Mean()
        metrics['test/dense_routing_weight_normalized_{}'.format(
            idx)] = tf.keras.metrics.Mean()

    if FLAGS.corruptions_interval > 0:
      corrupt_metrics = {}
      for intensity in range(1, max_intensity + 1):
        for corruption in corruption_types:
          dataset_name = '{0}_{1}'.format(corruption, intensity)
          corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
              tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
          corrupt_metrics['test/nll_weighted_moe_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())
          corrupt_metrics['test/accuracy_weighted_moe_{}'.format(
              dataset_name)] = (
                  tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/ece_weighted_moe_{}'.format(dataset_name)] = (
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

  def _process_3d_logits(logits, routing_weights, labels):
    routing_weights_3d = tf.expand_dims(routing_weights, axis=-1)
    weighted_logits = tf.math.reduce_mean(routing_weights_3d * logits, axis=1)
    unweighted_logits = tf.math.reduce_mean(logits, axis=1)

    probs = tf.nn.softmax(logits)
    unweighted_probs = tf.math.reduce_mean(probs, axis=1)
    weighted_probs = tf.math.reduce_sum(routing_weights_3d * probs, axis=1)

    labels_broadcasted = tf.tile(
        tf.reshape(labels, (-1, 1)), (1, FLAGS.num_experts))
    neg_log_likelihoods = tf.keras.losses.sparse_categorical_crossentropy(
        labels_broadcasted, logits, from_logits=True)
    unweighted_gibbs_ce = tf.math.reduce_mean(neg_log_likelihoods)
    weighted_gibbs_ce = tf.math.reduce_mean(
        tf.math.reduce_sum(routing_weights * neg_log_likelihoods, axis=1))
    return {
        'weighted_logits': weighted_logits,
        'unweighted_logits': unweighted_logits,
        'unweighted_probs': unweighted_probs,
        'weighted_probs': weighted_probs,
        'neg_log_likelihoods': neg_log_likelihoods,
        'unweighted_gibbs_ce': unweighted_gibbs_ce,
        'weighted_gibbs_ce': weighted_gibbs_ce
    }

  def _process_3d_logits_train(logits, routing_weights, labels):
    processing_results = _process_3d_logits(logits, routing_weights, labels)
    if FLAGS.loss == 'gibbs_ce':
      probs = processing_results['weighted_probs']
      negative_log_likelihood = processing_results['weighted_gibbs_ce']
    elif FLAGS.loss == 'unweighted_gibbs_ce':
      probs = processing_results['unweighted_probs']
      negative_log_likelihood = processing_results['unweighted_gibbs_ce']
    elif FLAGS.loss == 'moe':
      probs = processing_results['weighted_probs']
      negative_log_likelihood = tf.math.reduce_mean(
          tf.keras.losses.sparse_categorical_crossentropy(
              labels, probs, from_logits=False))
    elif FLAGS.loss == 'unweighted_moe':
      probs = processing_results['unweighted_probs']
      negative_log_likelihood = tf.math.reduce_mean(
          tf.keras.losses.sparse_categorical_crossentropy(
              labels, probs, from_logits=False))
    elif FLAGS.loss == 'poe':
      probs = tf.softmax(processing_results['weighted_logits'])
      negative_log_likelihood = tf.math.reduce_mean(
          tf.keras.losses.sparse_categorical_crossentropy(
              labels, processing_results['weighted_logits'], from_logits=True))
    elif FLAGS.loss == 'unweighted_poe':
      probs = tf.softmax(processing_results['unweighted_logits'])
      negative_log_likelihood = tf.math.reduce_mean(
          tf.keras.losses.sparse_categorical_crossentropy(
              labels, processing_results['unweighted_logits'],
              from_logits=True))
    return probs, negative_log_likelihood

  def _process_3d_logits_test(routing_weights, logits, labels):
    processing_results = _process_3d_logits(logits, routing_weights, labels)
    nll_poe = tf.math.reduce_mean(
        tf.keras.losses.sparse_categorical_crossentropy(
            labels, processing_results['weighted_logits'], from_logits=True))
    nll_unweighted_poe = tf.math.reduce_mean(
        tf.keras.losses.sparse_categorical_crossentropy(
            labels, processing_results['unweighted_logits'], from_logits=True))
    nll_moe = tf.math.reduce_mean(
        tf.keras.losses.sparse_categorical_crossentropy(
            labels, processing_results['weighted_probs'], from_logits=False))
    nll_unweighted_moe = tf.math.reduce_mean(
        tf.keras.losses.sparse_categorical_crossentropy(
            labels, processing_results['unweighted_probs'], from_logits=False))
    return {
        'nll_poe': nll_poe,
        'nll_moe': nll_moe,
        'nll_unweighted_poe': nll_unweighted_poe,
        'nll_unweighted_moe': nll_unweighted_moe,
        'unweighted_gibbs_ce': processing_results['unweighted_gibbs_ce'],
        'weighted_gibbs_ce': processing_results['weighted_gibbs_ce'],
        'weighted_probs': processing_results['weighted_probs'],
        'unweighted_probs': processing_results['unweighted_probs'],
        'weighted_logits': processing_results['weighted_logits'],
        'unweighted_logits': processing_results['unweighted_logits']
    }

  @tf.function
  def train_step(iterator):
    """Training StepFn."""

    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        if FLAGS.use_bfloat16:
          logits = tf.cast(logits, tf.float32)
        # if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
        if not isinstance(logits, tuple):
          raise ValueError('Logits are not a tuple.')
        # logits is a `Tensor` of shape [batch_size, num_experts, num_classes]
        logits, all_routing_weights = logits
        # routing_weights is a `Tensor` of shape [batch_size, num_experts]
        routing_weights = all_routing_weights[-1]
        if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
          probs, negative_log_likelihood = _process_3d_logits_train(
              logits, routing_weights, labels)
        else:
          probs = tf.nn.softmax(logits)
          # Prior to reduce_mean the NLLs are of the shape [batch, num_experts].
          negative_log_likelihood = tf.reduce_mean(
              tf.keras.losses.sparse_categorical_crossentropy(
                  labels, logits, from_logits=True))

        l2_loss = sum(model.losses)
        loss = negative_log_likelihood + l2_loss
        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)
      optimizer.apply_gradients(zip(grads, model.trainable_variables))

      metrics['train/ece'].update_state(labels, probs)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(labels, probs)

    strategy.run(step_fn, args=(next(iterator),))

  @tf.function
  def test_step(iterator, dataset_name):
    """Evaluation StepFn."""

    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      logits = model(images, training=False)
      if FLAGS.use_bfloat16:
        logits = tf.cast(logits, tf.float32)
      if not isinstance(logits, tuple):
        raise ValueError('Logits not a tuple')
      # logits is a `Tensor` of shape [batch_size, num_experts, num_classes]
      # routing_weights is a `Tensor` of shape [batch_size, num_experts]
      logits, all_routing_weights = logits
      routing_weights = all_routing_weights[-1]
      if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
        results = _process_3d_logits_test(routing_weights, logits, labels)
      else:
        probs = tf.nn.softmax(logits)
        negative_log_likelihood = tf.reduce_mean(
            tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

      if dataset_name == 'clean':
        if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
          metrics['test/nll_poe'].update_state(results['nll_poe'])
          metrics['test/nll_moe'].update_state(results['nll_moe'])
          metrics['test/nll_unweighted_poe'].update_state(
              results['nll_unweighted_poe'])
          metrics['test/nll_unweighted_moe'].update_state(
              results['nll_unweighted_moe'])
          metrics['test/unweighted_gibbs_ce'].update_state(
              results['unweighted_gibbs_ce'])
          metrics['test/negative_log_likelihood'].update_state(
              results['weighted_gibbs_ce'])
          metrics['test/ece'].update_state(labels, results['weighted_probs'])
          metrics['test/accuracy'].update_state(labels,
                                                results['weighted_probs'])
          metrics['test/ece_unweighted_moe'].update_state(
              labels, results['unweighted_probs'])
          metrics['test/accuracy_unweighted_moe'].update_state(
              labels, results['unweighted_probs'])
          metrics['test/ece_poe'].update_state(labels,
                                               results['weighted_logits'])
          metrics['test/accuracy_poe'].update_state(labels,
                                                    results['weighted_logits'])
          metrics['test/ece_unweighted_poe'].update_state(
              labels, results['unweighted_logits'])
          metrics['test/accuracy_unweighted_poe'].update_state(
              labels, results['unweighted_logits'])
          # TODO(ghassen): summarize all routing weights not only last layer's.
          average_routing_weights = tf.math.reduce_mean(routing_weights, axis=0)
          routing_weights_sum = tf.math.reduce_sum(average_routing_weights)
          for idx in range(FLAGS.num_experts):
            metrics['test/dense_routing_weight_{}'.format(idx)].update_state(
                average_routing_weights[idx])
            metrics['test/dense_routing_weight_normalized_{}'.format(
                idx)].update_state(average_routing_weights[idx] /
                                   routing_weights_sum)
          # TODO(ghassen): add more metrics for expert utilization,
          # load loss and importance/balance loss.
        else:
          metrics['test/negative_log_likelihood'].update_state(
              negative_log_likelihood)
          metrics['test/accuracy'].update_state(labels, probs)
          metrics['test/ece'].update_state(labels, probs)
      else:
        # TODO(ghassen): figure out how to aggregate probs for the OOD case.
        if not FLAGS.reduce_dense_outputs and FLAGS.use_cond_dense:
          corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
              results['unweighted_gibbs_ce'])
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
              labels, results['unweighted_probs'])
          corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
              labels, results['unweighted_probs'])

          corrupt_metrics['test/nll_weighted_moe{}'.format(
              dataset_name)].update_state(results['weighted_gibbs_ce'])
          corrupt_metrics['test/accuracy_weighted_moe_{}'.format(
              dataset_name)].update_state(labels, results['weighted_probs'])
          corrupt_metrics['test/ece_weighted_moe{}'.format(
              dataset_name)].update_state(labels, results['weighted_probs'])
        else:
          corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
              negative_log_likelihood)
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
              labels, probs)
          corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
              labels, probs)

    strategy.run(step_fn, args=(next(iterator),))

  metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

  train_iterator = iter(train_dataset)
  start_time = time.time()
  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)
    for step in range(steps_per_epoch):
      train_step(train_iterator)

      current_step = epoch * steps_per_epoch + (step + 1)
      max_steps = steps_per_epoch * FLAGS.train_epochs
      time_elapsed = time.time() - start_time
      steps_per_sec = float(current_step) / time_elapsed
      eta_seconds = (max_steps - current_step) / steps_per_sec
      message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                     current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                     steps_per_sec, eta_seconds / 60, time_elapsed / 60))
      if step % 20 == 0:
        logging.info(message)

    datasets_to_evaluate = {'clean': test_datasets['clean']}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      datasets_to_evaluate = test_datasets
    for dataset_name, test_dataset in datasets_to_evaluate.items():
      test_iterator = iter(test_dataset)
      logging.info('Testing on dataset %s', dataset_name)
      for step in range(steps_per_eval):
        if step % 20 == 0:
          logging.info('Starting to run eval step %s of epoch: %s', step, epoch)
        test_start_time = time.time()
        test_step(test_iterator, dataset_name)
        ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
        metrics['test/ms_per_example'].update_state(ms_per_example)

      logging.info('Done with testing on %s', dataset_name)

    corrupt_results = {}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                        corruption_types,
                                                        max_intensity)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)

    for metric in metrics.values():
      metric.reset_states()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(
          os.path.join(FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)
  final_checkpoint_name = checkpoint.save(
      os.path.join(FLAGS.output_dir, 'checkpoint'))
  logging.info('Saved last checkpoint to %s', final_checkpoint_name)
Ejemplo n.º 10
0
def main(argv):
    del argv  # unused arg

    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

    imagenet_train = utils.ImageNetInput(is_training=True,
                                         data_dir=FLAGS.data_dir,
                                         batch_size=FLAGS.per_core_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
    imagenet_eval = utils.ImageNetInput(is_training=False,
                                        data_dir=FLAGS.data_dir,
                                        batch_size=FLAGS.per_core_batch_size,
                                        use_bfloat16=FLAGS.use_bfloat16)
    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            imagenet_eval.input_fn)
    }
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info()
        for name in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset_name = '{0}_{1}'.format(name, intensity)
                corrupt_input_fn = utils.corrupt_test_input_fn(
                    batch_size=FLAGS.per_core_batch_size,
                    corruption_name=name,
                    corruption_intensity=intensity,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets[dataset_name] = (
                    strategy.experimental_distribute_datasets_from_function(
                        corrupt_input_fn))

    train_dataset = strategy.experimental_distribute_datasets_from_function(
        imagenet_train.input_fn)

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    with strategy.scope():
        logging.info('Building Keras ResNet-50 model')
        model = ub.models.resnet50_sngp(
            input_shape=(224, 224, 3),
            batch_size=None,
            num_classes=NUM_CLASSES,
            use_mc_dropout=FLAGS.use_mc_dropout,
            dropout_rate=FLAGS.dropout_rate,
            filterwise_dropout=FLAGS.filterwise_dropout,
            use_gp_layer=FLAGS.use_gp_layer,
            gp_hidden_dim=FLAGS.gp_hidden_dim,
            gp_scale=FLAGS.gp_scale,
            gp_bias=FLAGS.gp_bias,
            gp_input_normalization=FLAGS.gp_input_normalization,
            gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
            gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
            gp_output_imagenet_initializer=FLAGS.
            gp_output_imagenet_initializer,
            use_spec_norm=FLAGS.use_spec_norm,
            spec_norm_iteration=FLAGS.spec_norm_iteration,
            spec_norm_bound=FLAGS.spec_norm_bound)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Scale learning rate and decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 256
        learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr,
                                                   FLAGS.train_epochs,
                                                   _LR_SCHEDULE)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/stddev': tf.keras.metrics.Mean(),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())

        logging.info('Finished building Keras ResNet-50 model')

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            with tf.GradientTape() as tape:
                logits = model(images, training=True)

                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the weights. This excludes BN parameters and biases, but
                    # pay caution to their naming scheme.
                    if 'kernel' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs

            logits_list = []
            stddev_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)

                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract both
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.per_core_batch_size)

                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                logits = ed.layers.utils.mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                stddev_list.append(stddev)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
                metrics['test/stddev'].update_state(stddev)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/stddev_{}'.format(
                    dataset_name)].update_state(stddev)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity,
                FLAGS.alexnet_errors_path)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    # Save final checkpoint.
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)

    # Export final model as SavedModel.
    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
Ejemplo n.º 11
0
 def __init__(self, dataset_info):
   metric = um.ExpectedCalibrationError()
   super().__init__(
       dataset_info, metric, "ece", take_argmax=False, one_hot=False)
Ejemplo n.º 12
0
def main(argv):
  del argv  # unused arg
  tf.io.gfile.makedirs(FLAGS.output_dir)
  logging.info('Saving checkpoints at %s', FLAGS.output_dir)
  tf.random.set_seed(FLAGS.seed)

  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.TPUStrategy(resolver)

  aug_params = {
      'augmix': FLAGS.augmix,
      'aug_count': FLAGS.aug_count,
      'augmix_depth': FLAGS.augmix_depth,
      'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
      'augmix_width': FLAGS.augmix_width,
      'label_smoothing': FLAGS.label_smoothing,
      'ensemble_size': FLAGS.ensemble_size,
      'mixup_alpha': FLAGS.mixup_alpha,
      'random_augment': FLAGS.random_augment,
      'adaptive_mixup': FLAGS.adaptive_mixup,
      'forget_mixup': FLAGS.forget_mixup,
      'num_cores': FLAGS.num_cores,
      'threshold': FLAGS.forget_threshold,
      'cutmix': FLAGS.cutmix,
  }
  batch_size = ((FLAGS.per_core_batch_size // FLAGS.ensemble_size) *
                FLAGS.num_cores)
  train_input_fn = data_utils.load_input_fn(
      split=tfds.Split.TRAIN,
      name=FLAGS.dataset,
      batch_size=batch_size,
      use_bfloat16=FLAGS.use_bfloat16,
      proportion=FLAGS.train_proportion,
      validation_set=FLAGS.validation,
      aug_params=aug_params)
  if FLAGS.validation:
    validation_input_fn = data_utils.load_input_fn(
        split=tfds.Split.VALIDATION,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size,
        use_bfloat16=FLAGS.use_bfloat16,
        validation_set=True)
    val_dataset = strategy.experimental_distribute_datasets_from_function(
        validation_input_fn)
  clean_test_input_fn = data_utils.load_input_fn(
      split=tfds.Split.TEST,
      name=FLAGS.dataset,
      batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size,
      use_bfloat16=FLAGS.use_bfloat16)
  train_dataset = strategy.experimental_distribute_dataset(
      train_input_fn())
  test_datasets = {
      'clean': strategy.experimental_distribute_datasets_from_function(
          clean_test_input_fn),
  }
  if FLAGS.corruptions_interval > 0:
    if FLAGS.dataset == 'cifar10':
      load_c_dataset = utils.load_cifar10_c
    else:
      load_c_dataset = functools.partial(utils.load_cifar100_c,
                                         path=FLAGS.cifar100_c_path)
    corruption_types, max_intensity = utils.load_corrupted_test_info(
        FLAGS.dataset)
    for corruption in corruption_types:
      for intensity in range(1, max_intensity + 1):
        dataset = load_c_dataset(
            corruption_name=corruption,
            corruption_intensity=intensity,
            batch_size=batch_size,
            use_bfloat16=FLAGS.use_bfloat16)
        test_datasets['{0}_{1}'.format(corruption, intensity)] = (
            strategy.experimental_distribute_dataset(dataset))

  ds_info = tfds.builder(FLAGS.dataset).info
  num_train_examples = ds_info.splits['train'].num_examples
  # Train_proportion is a float so need to convert steps_per_epoch to int.
  if FLAGS.validation:
    # TODO(ywenxu): Remove hard-coding validation images.
    steps_per_epoch = int((num_train_examples *
                           FLAGS.train_proportion - 2500) // batch_size)
    steps_per_val = 2500 // (FLAGS.per_core_batch_size * FLAGS.num_cores)
  else:
    steps_per_epoch = int(
        num_train_examples * FLAGS.train_proportion) // batch_size
  steps_per_eval = ds_info.splits['test'].num_examples // batch_size
  num_classes = ds_info.features['label'].num_classes

  if FLAGS.use_bfloat16:
    policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
    tf.keras.mixed_precision.experimental.set_policy(policy)

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  with strategy.scope():
    logging.info('Building Keras model')
    model = batchensemble_model.wide_resnet(
        input_shape=ds_info.features['image'].shape,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        ensemble_size=FLAGS.ensemble_size,
        random_sign_init=FLAGS.random_sign_init,
        l2=FLAGS.l2,
        use_ensemble_bn=FLAGS.use_ensemble_bn)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Linearly scale learning rate and the decay epochs by vanilla settings.
    base_lr = FLAGS.base_learning_rate * batch_size / 128
    lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                       for start_epoch_str in FLAGS.lr_decay_epochs]
    lr_schedule = utils.LearningRateSchedule(
        steps_per_epoch,
        base_lr,
        decay_ratio=FLAGS.lr_decay_ratio,
        decay_epochs=lr_decay_epochs,
        warmup_epochs=FLAGS.lr_warmup_epochs)
    optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                        momentum=0.9,
                                        nesterov=True)

    diversity_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
        FLAGS.diversity_coeff, FLAGS.diversity_decay_epoch * steps_per_epoch,
        decay_rate=0.97, staircase=True)

    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/similarity': tf.keras.metrics.Mean(),
        'train/l2': tf.keras.metrics.Mean(),
        'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/member_accuracy_mean': (
            tf.keras.metrics.SparseCategoricalAccuracy()),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/member_ece_mean': um.ExpectedCalibrationError(
            num_bins=FLAGS.num_bins)
    }
    for i in range(FLAGS.ensemble_size):
      metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
      metrics['test/accuracy_member_{}'.format(i)] = (
          tf.keras.metrics.SparseCategoricalAccuracy())
      metrics['test/ece_member_{}'.format(i)] = (
          um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    test_diversity = {}
    training_diversity = {}
    corrupt_diversity = {}
    if FLAGS.ensemble_size > 1:
      test_diversity = {
          'test/disagreement': tf.keras.metrics.Mean(),
          'test/average_kl': tf.keras.metrics.Mean(),
          'test/cosine_similarity': tf.keras.metrics.Mean(),
      }
      training_diversity = {
          'train/disagreement': tf.keras.metrics.Mean(),
          'train/average_kl': tf.keras.metrics.Mean(),
          'train/cosine_similarity': tf.keras.metrics.Mean(),
      }

    if FLAGS.corruptions_interval > 0:
      corrupt_metrics = {}
      for intensity in range(1, max_intensity + 1):
        for corruption in corruption_types:
          dataset_name = '{0}_{1}'.format(corruption, intensity)
          corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
              tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
          corrupt_metrics['test/member_acc_mean_{}'.format(dataset_name)] = (
              tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/member_ece_mean_{}'.format(dataset_name)] = (
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
          corrupt_diversity['corrupt_diversity/average_kl_{}'.format(
              dataset_name)] = tf.keras.metrics.Mean()
          corrupt_diversity['corrupt_diversity/cosine_similarity_{}'.format(
              dataset_name)] = tf.keras.metrics.Mean()
          corrupt_diversity['corrupt_diversity/disagreement_{}'.format(
              dataset_name)] = tf.keras.metrics.Mean()

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

  @tf.function
  def train_step(iterator):
    """Training StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      if FLAGS.forget_mixup:
        images, labels, idx = inputs
      else:
        images, labels = inputs
      if FLAGS.adaptive_mixup or FLAGS.forget_mixup:
        images = tf.identity(images)
      elif FLAGS.augmix or FLAGS.random_augment:
        images_shape = tf.shape(images)
        images = tf.reshape(tf.transpose(
            images, [1, 0, 2, 3, 4]), [-1, images_shape[2],
                                       images_shape[3], images_shape[4]])
      else:
        images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
      # Augmix, adaptive mixup, forget mixup preprocessing gives tiled labels.
      if FLAGS.mixup_alpha > 0 or FLAGS.label_smoothing > 0 or FLAGS.cutmix:
        if FLAGS.augmix or FLAGS.adaptive_mixup or FLAGS.forget_mixup:
          labels = tf.identity(labels)
        else:
          labels = tf.tile(labels, [FLAGS.ensemble_size, 1])
      else:
        labels = tf.tile(labels, [FLAGS.ensemble_size])

      def _is_batch_norm(v):
        """Decide whether a variable belongs to `batch_norm`."""
        keywords = ['batchnorm', 'batch_norm', 'bn']
        return any([k in v.name.lower() for k in keywords])

      def _normalize(x):
        """Normalize an input with l2 norm."""
        l2 = tf.norm(x, ord=2, axis=-1)
        return x / tf.expand_dims(l2, axis=-1)

      # Taking the sum of upper triangular of XX^T and divided by ensemble size.
      def pairwise_cosine_distance(x):
        """Compute the pairwise distance in a matrix."""
        normalized_x = _normalize(x)
        return (tf.reduce_sum(
            tf.matmul(normalized_x, normalized_x, transpose_b=True)) -
                FLAGS.ensemble_size) / (2.0 * FLAGS.ensemble_size)

      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        if FLAGS.use_bfloat16:
          logits = tf.cast(logits, tf.float32)
        if FLAGS.mixup_alpha > 0 or FLAGS.label_smoothing > 0 or FLAGS.cutmix:
          negative_log_likelihood = tf.reduce_mean(
              tf.keras.losses.categorical_crossentropy(labels,
                                                       logits,
                                                       from_logits=True))
        else:
          negative_log_likelihood = tf.reduce_mean(
              tf.keras.losses.sparse_categorical_crossentropy(labels,
                                                              logits,
                                                              from_logits=True))

        l2_loss = sum(model.losses)
        fast_weights = [var for var in model.trainable_variables if
                        not _is_batch_norm(var) and (
                            'alpha' in var.name or 'gamma' in var.name)]

        pairwise_distance_loss = tf.add_n(
            [pairwise_cosine_distance(var) for var in fast_weights])

        diversity_start_iter = steps_per_epoch * FLAGS.diversity_start_epoch
        diversity_iterations = optimizer.iterations - diversity_start_iter
        if diversity_iterations > 0:
          diversity_coeff = diversity_schedule(diversity_iterations)
          diversity_loss = diversity_coeff * pairwise_distance_loss
          loss = negative_log_likelihood + l2_loss + diversity_loss
        else:
          loss = negative_log_likelihood + l2_loss
        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)

      # Separate learning rate implementation.
      if FLAGS.fast_weight_lr_multiplier != 1.0:
        grads_and_vars = []
        for grad, var in zip(grads, model.trainable_variables):
          # Apply different learning rate on the fast weight approximate
          # posterior/prior parameters. This is excludes BN and slow weights,
          # but pay caution to the naming scheme.
          if (not _is_batch_norm(var) and 'kernel' not in var.name):
            grads_and_vars.append((grad * FLAGS.fast_weight_lr_multiplier, var))
          else:
            grads_and_vars.append((grad, var))
        optimizer.apply_gradients(grads_and_vars)
      else:
        optimizer.apply_gradients(zip(grads, model.trainable_variables))

      probs = tf.nn.softmax(logits)
      if FLAGS.ensemble_size > 1:
        per_probs = tf.reshape(
            probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
        diversity_results = um.average_pairwise_diversity(
            per_probs, FLAGS.ensemble_size)
        for k, v in diversity_results.items():
          training_diversity['train/' + k].update_state(v)

      if FLAGS.mixup_alpha > 0 or FLAGS.label_smoothing > 0 or FLAGS.cutmix:
        labels = tf.argmax(labels, axis=-1)
      metrics['train/ece'].update_state(labels, probs)
      metrics['train/similarity'].update_state(pairwise_distance_loss)
      metrics['train/l2'].update_state(l2_loss)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(labels, logits)
      if FLAGS.forget_mixup:
        train_predictions = tf.argmax(probs, -1)
        labels = tf.cast(labels, train_predictions.dtype)
        # For each ensemble member, we accumulate the accuracy counts.
        accuracy_counts = tf.cast(tf.reshape(
            (train_predictions == labels), [FLAGS.ensemble_size, -1]),
                                  tf.float32)
        return accuracy_counts, idx

    if FLAGS.forget_mixup:
      return strategy.run(step_fn, args=(next(iterator),))
    else:
      strategy.run(step_fn, args=(next(iterator),))

  @tf.function
  def test_step(iterator, dataset_name):
    """Evaluation StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
      logits = model(images, training=False)
      if FLAGS.use_bfloat16:
        logits = tf.cast(logits, tf.float32)
      probs = tf.nn.softmax(logits)
      per_probs = tf.split(probs,
                           num_or_size_splits=FLAGS.ensemble_size,
                           axis=0)
      for i in range(FLAGS.ensemble_size):
        member_probs = per_probs[i]
        if dataset_name == 'clean':
          member_loss = tf.keras.losses.sparse_categorical_crossentropy(
              labels, member_probs)
          metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
          metrics['test/accuracy_member_{}'.format(i)].update_state(
              labels, member_probs)
          metrics['test/member_accuracy_mean'].update_state(
              labels, member_probs)
          metrics['test/ece_member_{}'.format(i)].update_state(labels,
                                                               member_probs)
          metrics['test/member_ece_mean'].update_state(labels, member_probs)
        elif dataset_name != 'validation':
          corrupt_metrics['test/member_acc_mean_{}'.format(
              dataset_name)].update_state(labels, member_probs)
          corrupt_metrics['test/member_ece_mean_{}'.format(
              dataset_name)].update_state(labels, member_probs)

      if FLAGS.ensemble_size > 1:
        per_probs_tensor = tf.reshape(
            probs, tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
        diversity_results = um.average_pairwise_diversity(
            per_probs_tensor, FLAGS.ensemble_size)
        if dataset_name == 'clean':
          for k, v in diversity_results.items():
            test_diversity['test/' + k].update_state(v)
        elif dataset_name != 'validation':
          for k, v in diversity_results.items():
            corrupt_diversity['corrupt_diversity/{}_{}'.format(
                k, dataset_name)].update_state(v)

      probs = tf.reduce_mean(per_probs, axis=0)
      negative_log_likelihood = tf.reduce_mean(
          tf.keras.losses.sparse_categorical_crossentropy(labels, probs))
      if dataset_name == 'clean':
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)
      elif dataset_name != 'validation':
        corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
            labels, probs)

      if dataset_name == 'validation':
        return per_probs_tensor, labels

    if dataset_name == 'validation':
      return strategy.run(step_fn, args=(next(iterator),))
    else:
      strategy.run(step_fn, args=(next(iterator),))

  train_iterator = iter(train_dataset)
  start_time = time.time()
  forget_counts_history = []
  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)
    acc_counts_list = []
    idx_list = []
    for step in range(steps_per_epoch):
      if FLAGS.forget_mixup:
        temp_accuracy_counts, temp_idx = train_step(train_iterator)
        acc_counts_list.append(temp_accuracy_counts)
        idx_list.append(temp_idx)
      else:
        train_step(train_iterator)

      current_step = epoch * steps_per_epoch + (step + 1)
      max_steps = steps_per_epoch * FLAGS.train_epochs
      time_elapsed = time.time() - start_time
      steps_per_sec = float(current_step) / time_elapsed
      eta_seconds = (max_steps - current_step) / steps_per_sec
      message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                     current_step / max_steps,
                     epoch + 1,
                     FLAGS.train_epochs,
                     steps_per_sec,
                     eta_seconds / 60,
                     time_elapsed / 60))
      if step % 20 == 0:
        logging.info(message)

    # Only one of the forget_mixup and adaptive_mixup can be true.
    if FLAGS.forget_mixup:
      current_acc = [tf.concat(list(acc_counts_list[i].values), axis=1)
                     for i in range(len(acc_counts_list))]
      total_idx = [tf.concat(list(idx_list[i].values), axis=0)
                   for i in range(len(idx_list))]
      current_acc = tf.cast(tf.concat(current_acc, axis=1), tf.int32)
      total_idx = tf.concat(total_idx, axis=0)

      current_forget_path = os.path.join(FLAGS.output_dir,
                                         'forget_counts.npy')
      last_acc_path = os.path.join(FLAGS.output_dir, 'last_acc.npy')
      if epoch == 0:
        forget_counts = tf.zeros(
            [FLAGS.ensemble_size, num_train_examples], dtype=tf.int32)
        last_acc = tf.zeros(
            [FLAGS.ensemble_size, num_train_examples], dtype=tf.int32)
      else:
        if 'last_acc' not in locals():
          with tf.io.gfile.GFile(last_acc_path, 'rb') as f:
            last_acc = np.load(f)
          last_acc = tf.cast(tf.convert_to_tensor(last_acc), tf.int32)
        if 'forget_counts' not in locals():
          with tf.io.gfile.GFile(current_forget_path, 'rb') as f:
            forget_counts = np.load(f)
          forget_counts = tf.cast(tf.convert_to_tensor(forget_counts), tf.int32)

      selected_last_acc = tf.gather(last_acc, total_idx, axis=1)
      forget_this_epoch = tf.cast(current_acc < selected_last_acc, tf.int32)
      forget_this_epoch = tf.transpose(forget_this_epoch)
      target_shape = tf.constant([num_train_examples, FLAGS.ensemble_size])
      current_forget_counts = tf.scatter_nd(tf.reshape(total_idx, [-1, 1]),
                                            forget_this_epoch, target_shape)
      current_forget_counts = tf.transpose(current_forget_counts)
      acc_this_epoch = tf.transpose(current_acc)
      last_acc = tf.scatter_nd(tf.reshape(total_idx, [-1, 1]),
                               acc_this_epoch, target_shape)
      # This is lower bound of true acc.
      last_acc = tf.transpose(last_acc)

      # TODO(ywenxu): We count the dropped examples as forget. Fix this later.
      forget_counts += current_forget_counts
      forget_counts_history.append(forget_counts)
      logging.info('forgetting counts')
      logging.info(tf.stack(forget_counts_history, 0))
      with tf.io.gfile.GFile(os.path.join(
          FLAGS.output_dir, 'forget_counts_history.npy'), 'wb') as f:
        np.save(f, tf.stack(forget_counts_history, 0).numpy())
      with tf.io.gfile.GFile(current_forget_path, 'wb') as f:
        np.save(f, forget_counts.numpy())
      with tf.io.gfile.GFile(last_acc_path, 'wb') as f:
        np.save(f, last_acc.numpy())
      aug_params['forget_counts_dir'] = current_forget_path

      train_input_fn = data_utils.load_input_fn(
          split=tfds.Split.TRAIN,
          name=FLAGS.dataset,
          batch_size=FLAGS.num_cores * (
              FLAGS.per_core_batch_size // FLAGS.ensemble_size),
          use_bfloat16=FLAGS.use_bfloat16,
          validation_set=FLAGS.validation,
          aug_params=aug_params)
      train_dataset = strategy.experimental_distribute_dataset(
          train_input_fn())
      train_iterator = iter(train_dataset)

    if FLAGS.adaptive_mixup:
      val_iterator = iter(val_dataset)
      logging.info('Testing on validation dataset')
      predictions_list = []
      labels_list = []
      for step in range(steps_per_val):
        temp_predictions, temp_labels = test_step(val_iterator, 'validation')
        predictions_list.append(temp_predictions)
        labels_list.append(temp_labels)
      predictions = [tf.concat(list(predictions_list[i].values), axis=1)
                     for i in range(len(predictions_list))]
      labels = [tf.concat(list(labels_list[i].values), axis=0)
                for i in range(len(labels_list))]
      predictions = tf.concat(predictions, axis=1)
      labels = tf.cast(tf.concat(labels, axis=0), tf.int64)

      def compute_acc_conf(preds, label, focus_class):
        class_preds = tf.boolean_mask(preds, label == focus_class, axis=1)
        class_pred_labels = tf.argmax(class_preds, axis=-1)
        confidence = tf.reduce_mean(tf.reduce_max(class_preds, axis=-1), -1)
        accuracy = tf.reduce_mean(tf.cast(
            class_pred_labels == focus_class, tf.float32), axis=-1)
        return accuracy - confidence

      calibration_per_class = [compute_acc_conf(
          predictions, labels, i) for i in range(num_classes)]
      calibration_per_class = tf.stack(calibration_per_class, axis=1)
      logging.info('calibration per class')
      logging.info(calibration_per_class)
      mixup_coeff = tf.where(calibration_per_class > 0, 1.0, FLAGS.mixup_alpha)
      mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1)
      logging.info('mixup coeff')
      logging.info(mixup_coeff)
      aug_params['mixup_coeff'] = mixup_coeff
      train_input_fn = data_utils.load_input_fn(
          split=tfds.Split.TRAIN,
          name=FLAGS.dataset,
          batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size,
          use_bfloat16=FLAGS.use_bfloat16,
          validation_set=True,
          aug_params=aug_params)
      train_dataset = strategy.experimental_distribute_datasets_from_function(
          train_input_fn)
      train_iterator = iter(train_dataset)

    datasets_to_evaluate = {'clean': test_datasets['clean']}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      datasets_to_evaluate = test_datasets
    for dataset_name, test_dataset in datasets_to_evaluate.items():
      test_iterator = iter(test_dataset)
      logging.info('Testing on dataset %s', dataset_name)
      for step in range(steps_per_eval):
        if step % 20 == 0:
          logging.info('Starting to run eval step %s of epoch: %s', step,
                       epoch)
        test_step(test_iterator, dataset_name)
      logging.info('Done with testing on %s', dataset_name)

    corrupt_results = {}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      # This includes corrupt_diversity whose disagreement normalized by its
      # corrupt mean error rate.
      corrupt_results = utils.aggregate_corrupt_metrics(
          corrupt_metrics,
          corruption_types,
          max_intensity,
          corrupt_diversity=corrupt_diversity,
          output_dir=FLAGS.output_dir)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    for i in range(FLAGS.ensemble_size):
      logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
                   i, metrics['test/nll_member_{}'.format(i)].result(),
                   metrics['test/accuracy_member_{}'.format(i)].result() * 100)

    total_metrics = metrics.copy()
    total_metrics.update(training_diversity)
    total_metrics.update(test_diversity)
    total_results = {name: metric.result()
                     for name, metric in total_metrics.items()}
    total_results.update(corrupt_results)
    # Normalize all disagreement metrics (training, testing) by test accuracy.
    # Disagreement on corrupt dataset is normalized by their own error rate.
    test_acc = total_metrics['test/accuracy'].result()
    for name, metric in total_metrics.items():
      if 'disagreement' in name:
        total_results[name] = metric.result() / test_acc

    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)
      if FLAGS.forget_mixup:
        tf.summary.histogram('forget_counts', forget_counts, step=epoch + 1)

    for metric in total_metrics.values():
      metric.reset_states()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(
          os.path.join(FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)

    # Need to store the last but one checkpoint in adaptive mixup setup.
    if FLAGS.adaptive_mixup and epoch == (FLAGS.train_epochs - 2):
      checkpoint_name = checkpoint.save(
          os.path.join(FLAGS.output_dir, 'last_but_one_checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)

  final_checkpoint_name = checkpoint.save(
      os.path.join(FLAGS.output_dir, 'checkpoint'))
  logging.info('Saved last checkpoint to %s', final_checkpoint_name)
  final_save_name = os.path.join(FLAGS.output_dir, 'model')
  model.save(final_save_name)
  logging.info('Saved model to %s', final_save_name)
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    train_dataset_size = ds_info.splits['train'].num_examples
    steps_per_epoch = train_dataset_size // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    train_dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TRAIN).load(batch_size=batch_size)
    clean_test_dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        extra_kwargs = {}
        if FLAGS.dataset == 'cifar100':
            extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
        corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
        for corruption_type in corruption_types:
            for severity in range(1, 6):
                dataset = ub.datasets.get(
                    f'{FLAGS.dataset}_corrupted',
                    corruption_type=corruption_type,
                    severity=severity,
                    split=tfds.Split.TEST,
                    **extra_kwargs).load(batch_size=batch_size)
                test_datasets[f'{corruption_type}_{severity}'] = (
                    strategy.experimental_distribute_dataset(dataset))

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_variational(
            input_shape=ds_info.features['image'].shape,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            prior_stddev=FLAGS.prior_stddev,
            dataset_size=train_dataset_size,
            stddev_init=FLAGS.stddev_init)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'train/kl': tf.keras.metrics.Mean(),
            'train/kl_scale': tf.keras.metrics.Mean(),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, 6):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))

                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the BN parameters and bias terms. This
                    # excludes only fast weight approximate posterior/prior parameters,
                    # but pay caution to their naming scheme.
                    if 'batch_norm' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                kl = sum(model.losses)
                kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
                kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
                kl_scale = tf.minimum(1., kl_scale)
                kl_loss = kl_scale * kl

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss + kl_loss
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/kl'].update_state(kl)
            metrics['train/kl_scale'].update_state(kl_scale)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images = inputs['features']
            labels = inputs['labels']
            # TODO(trandustin): Use more eval samples only on corrupted predictions;
            # it's expensive but a one-time compute if scheduled post-training.
            if FLAGS.num_eval_samples > 1 and dataset_name != 'clean':
                logits = tf.stack([
                    model(images, training=False)
                    for _ in range(FLAGS.num_eval_samples)
                ],
                                  axis=0)
            else:
                logits = model(images, training=False)
            probs = tf.nn.softmax(logits)
            if FLAGS.num_eval_samples > 1 and dataset_name != 'clean':
                probs = tf.reduce_mean(probs, axis=0)
            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    check_bool = FLAGS.train_proportion > 0 and FLAGS.train_proportion <= 1
    assert check_bool, 'Proportion of train set has to meet 0 < prop <= 1.'

    drop_remainder_validation = True
    if not FLAGS.use_gpu:
        # This has to be True for TPU traing, otherwise the batchsize of images in
        # the validation set can't be determined by TPU compile.
        assert drop_remainder_validation, 'drop_remainder must be True in TPU mode.'

    train_input_fn = utils.load_input_fn(split=tfds.Split.TRAIN,
                                         name=FLAGS.dataset,
                                         batch_size=per_core_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16,
                                         repeat=True,
                                         proportion=FLAGS.train_proportion)
    validation_proportion = 1 - FLAGS.train_proportion
    validation_input_fn = utils.load_input_fn(
        split=tfds.Split.VALIDATION,
        name=FLAGS.dataset,
        batch_size=per_core_batch_size,
        use_bfloat16=FLAGS.use_bfloat16,
        repeat=True,
        proportion=validation_proportion,
        drop_remainder=drop_remainder_validation)
    clean_test_input_fn = utils.load_input_fn(split=tfds.Split.TEST,
                                              name=FLAGS.dataset,
                                              batch_size=per_core_batch_size,
                                              use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = strategy.experimental_distribute_datasets_from_function(
        train_input_fn)
    validation_dataset = strategy.experimental_distribute_datasets_from_function(
        validation_input_fn)
    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            clean_test_input_fn),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_input_fn = utils.load_cifar10_c_input_fn
        else:
            load_c_input_fn = functools.partial(utils.load_cifar100_c_input_fn,
                                                path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                input_fn = load_c_input_fn(corruption_name=corruption,
                                           corruption_intensity=intensity,
                                           batch_size=per_core_batch_size,
                                           use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_datasets_from_function(
                        input_fn))

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = per_core_batch_size * FLAGS.num_cores
    train_sample_size = ds_info.splits[
        'train'].num_examples * FLAGS.train_proportion
    steps_per_epoch = int(train_sample_size / batch_size)
    train_sample_size = int(train_sample_size)

    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    logging.info('Building Keras model.')
    depth = 28
    width = 10

    dict_ranges = {'min': FLAGS.min_l2_range, 'max': FLAGS.max_l2_range}
    ranges = [dict_ranges for _ in range(6)]  # 6 independent l2 parameters
    model_config = {
        'key_to_index': {
            'input_conv_l2_kernel': 0,
            'group_l2_kernel': 1,
            'group_1_l2_kernel': 2,
            'group_2_l2_kernel': 3,
            'dense_l2_kernel': 4,
            'dense_l2_bias': 5,
        },
        'ranges': ranges,
        'test': None
    }
    lambdas_config = LambdaConfig(model_config['ranges'],
                                  model_config['key_to_index'])

    if FLAGS.e_body_hidden_units > 0:
        e_body_arch = '({},)'.format(FLAGS.e_body_hidden_units)
    else:
        e_body_arch = '()'
    e_shared_arch = '()'
    e_activation = 'tanh'
    filters_resnet = [16]
    for i in range(0, 3):  # 3 groups of blocks
        filters_resnet.extend([16 * width * 2**i] *
                              9)  # 9 layers in each block
    # e_head dim for conv2d is just the number of filters (only
    # kernel) and twice num of classes for the last dense layer (kernel + bias)
    e_head_dims = [x for x in filters_resnet] + [2 * num_classes]

    with strategy.scope():
        e_models = e_factory(
            lambdas_config.input_shape,
            e_head_dims=e_head_dims,
            e_body_arch=eval(e_body_arch),  # pylint: disable=eval-used
            e_shared_arch=eval(e_shared_arch),  # pylint: disable=eval-used
            activation=e_activation,
            use_bias=FLAGS.e_model_use_bias,
            e_head_init=FLAGS.init_emodels_stddev)

        model = wide_resnet_hyperbatchensemble(
            input_shape=ds_info.features['image'].shape,
            depth=depth,
            width_multiplier=width,
            num_classes=num_classes,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            config=lambdas_config,
            e_models=e_models,
            l2_batchnorm_layer=FLAGS.l2_batchnorm,
            regularize_fast_weights=FLAGS.regularize_fast_weights,
            fast_weights_eq_contraint=FLAGS.fast_weights_eq_contraint,
            version=2)

        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # build hyper-batchensemble complete -------------------------

        # Initialize Lambda distributions for tuning
        lambdas_mean = tf.reduce_mean(
            log_uniform_mean([lambdas_config.log_min, lambdas_config.log_max]))
        lambdas0 = tf.random.normal((FLAGS.ensemble_size, lambdas_config.dim),
                                    lambdas_mean,
                                    0.1 * FLAGS.ens_init_delta_bounds)
        lower0 = lambdas0 - tf.constant(FLAGS.ens_init_delta_bounds)
        lower0 = tf.maximum(lower0, 1e-8)
        upper0 = lambdas0 + tf.constant(FLAGS.ens_init_delta_bounds)

        log_lower = tf.Variable(tf.math.log(lower0))
        log_upper = tf.Variable(tf.math.log(upper0))
        lambda_parameters = [log_lower, log_upper]  # these variables are tuned
        clip_lambda_parameters(lambda_parameters, lambdas_config)

        # Optimizer settings to train model weights
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        # Note: Here, we don't divide the epochs by 200 as for the other uncertainty
        # baselines.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [int(l) for l in FLAGS.lr_decay_epochs]

        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)

        # tuner used for optimizing lambda_parameters
        tuner = tf.keras.optimizers.Adam(FLAGS.lr_tuning)

        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'train/disagreement': tf.keras.metrics.Mean(),
            'train/average_kl': tf.keras.metrics.Mean(),
            'train/cosine_similarity': tf.keras.metrics.Mean(),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/gibbs_nll': tf.keras.metrics.Mean(),
            'test/gibbs_accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/disagreement': tf.keras.metrics.Mean(),
            'test/average_kl': tf.keras.metrics.Mean(),
            'test/cosine_similarity': tf.keras.metrics.Mean(),
            'validation/loss': tf.keras.metrics.Mean(),
            'validation/loss_entropy': tf.keras.metrics.Mean(),
            'validation/loss_ce': tf.keras.metrics.Mean()
        }
        corrupt_metrics = {}

        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())
        if FLAGS.corruptions_interval > 0:
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model,
                                         lambda_parameters=lambda_parameters,
                                         optimizer=optimizer)

        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint and FLAGS.restore_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            # generate lambdas
            lambdas = log_uniform_sample(per_core_batch_size,
                                         lambda_parameters)
            lambdas = tf.reshape(lambdas,
                                 (FLAGS.ensemble_size * per_core_batch_size,
                                  lambdas_config.dim))

            with tf.GradientTape() as tape:
                logits = model([images, lambdas], training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                if FLAGS.use_gibbs_ce:
                    # Average of single model CEs
                    # tiling of labels should be only done for Gibbs CE loss
                    labels = tf.tile(labels, [FLAGS.ensemble_size])
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    # Ensemble CE uses no tiling of the labels
                    negative_log_likelihood = ensemble_crossentropy(
                        labels, logits, FLAGS.ensemble_size)
                # Note: Divide l2_loss by sample_size (this differs from uncertainty_
                # baselines implementation.)
                l2_loss = sum(model.losses) / train_sample_size
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate for fast weights.
            grads_and_vars = []
            for grad, var in zip(grads, model.trainable_variables):
                if (('alpha' in var.name or 'gamma' in var.name)
                        and 'batch_norm' not in var.name):
                    grads_and_vars.append(
                        (grad * FLAGS.fast_weight_lr_multiplier, var))
                else:
                    grads_and_vars.append((grad, var))
            optimizer.apply_gradients(grads_and_vars)

            probs = tf.nn.softmax(logits)
            per_probs = tf.split(probs,
                                 num_or_size_splits=FLAGS.ensemble_size,
                                 axis=0)
            per_probs_stacked = tf.stack(per_probs, axis=0)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)
            diversity_results = um.average_pairwise_diversity(
                per_probs_stacked, FLAGS.ensemble_size)
            for k, v in diversity_results.items():
                metrics['train/' + k].update_state(v)

            if grads_and_vars:
                grads, _ = zip(*grads_and_vars)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def tuning_step(iterator):
        """Tuning StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            with tf.GradientTape(watch_accessed_variables=False) as tape:
                tape.watch(lambda_parameters)

                # sample lambdas
                if FLAGS.sample_and_tune:
                    lambdas = log_uniform_sample(per_core_batch_size,
                                                 lambda_parameters)
                else:
                    lambdas = log_uniform_mean(lambda_parameters)
                    lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0)
                lambdas = tf.reshape(lambdas,
                                     (FLAGS.ensemble_size *
                                      per_core_batch_size, lambdas_config.dim))
                # ensemble CE
                logits = model([images, lambdas], training=False)
                ce = ensemble_crossentropy(labels, logits, FLAGS.ensemble_size)
                # entropy penalty for lambda distribution
                entropy = FLAGS.tau * log_uniform_entropy(lambda_parameters)
                loss = ce - entropy
                scaled_loss = loss / strategy.num_replicas_in_sync

            gradients = tape.gradient(loss, lambda_parameters)
            tuner.apply_gradients(zip(gradients, lambda_parameters))

            metrics['validation/loss_ce'].update_state(
                ce / strategy.num_replicas_in_sync)
            metrics['validation/loss_entropy'].update_state(
                entropy / strategy.num_replicas_in_sync)
            metrics['validation/loss'].update_state(scaled_loss)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            # Note that we don't use tf.tile for labels here
            images, labels = inputs
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            # get lambdas
            lambdas = log_uniform_mean(lambda_parameters)
            rep_lambdas = tf.repeat(lambdas, per_core_batch_size, axis=0)

            # eval on testsets
            logits = model([images, rep_lambdas], training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)
            per_probs = tf.split(probs,
                                 num_or_size_splits=FLAGS.ensemble_size,
                                 axis=0)

            # per member performance and gibbs performance (average per member perf)
            if dataset_name == 'clean':
                for i in range(FLAGS.ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)

                labels_tile = tf.tile(labels, [FLAGS.ensemble_size])
                metrics['test/gibbs_nll'].update_state(
                    tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels_tile, logits, from_logits=True)))
                metrics['test/gibbs_accuracy'].update_state(labels_tile, probs)

            # ensemble performance
            negative_log_likelihood = ensemble_crossentropy(
                labels, logits, FLAGS.ensemble_size)
            probs = tf.reduce_mean(per_probs, axis=0)
            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

            if dataset_name == 'clean':
                per_probs_stacked = tf.stack(per_probs, axis=0)
                diversity_results = um.average_pairwise_diversity(
                    per_probs_stacked, FLAGS.ensemble_size)
                for k, v in diversity_results.items():
                    metrics['test/' + k].update_state(v)

        strategy.run(step_fn, args=(next(iterator), ))

    logging.info('--- Starting training using %d examples. ---',
                 train_sample_size)
    train_iterator = iter(train_dataset)
    validation_iterator = iter(validation_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)
            do_tuning = (epoch >= FLAGS.tuning_warmup_epochs)
            if do_tuning and ((step + 1) % FLAGS.tuning_every_x_step == 0):
                tuning_step(validation_iterator)
                # clip lambda parameters if outside of range
                clip_lambda_parameters(lambda_parameters, lambdas_config)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        # evaluate on test data
        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_step(test_iterator, dataset_name)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)
        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Validation Loss: %.4f, CE: %.4f, Entropy: %.4f',
                     metrics['validation/loss'].result(),
                     metrics['validation/loss_ce'].result(),
                     metrics['validation/loss_entropy'].result())
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update({
            name: metric.result()
            for name, metric in corrupt_metrics.items()
        })
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        # save checkpoint and lambdas config
        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            lambdas_cf = lambdas_config.get_config()
            filepath = os.path.join(FLAGS.output_dir, 'lambdas_config.p')
            with tf.io.gfile.GFile(filepath, 'wb') as fp:
                pickle.dump(lambdas_cf, fp, protocol=pickle.HIGHEST_PROTOCOL)
            logging.info('Saved checkpoint to %s', checkpoint_name)
Ejemplo n.º 15
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    aug_params = {
        'augmix': FLAGS.augmix,
        'aug_count': FLAGS.aug_count,
        'augmix_depth': FLAGS.augmix_depth,
        'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
        'augmix_width': FLAGS.augmix_width,
        'ensemble_size': 1,
        'mixup_alpha': FLAGS.mixup_alpha,
        'adaptive_mixup': FLAGS.adaptive_mixup,
        'random_augment': FLAGS.random_augment,
        'forget_mixup': FLAGS.forget_mixup,
        'num_cores': FLAGS.num_cores,
        'threshold': FLAGS.forget_threshold,
    }
    batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores //
                  FLAGS.num_dropout_samples_training)
    train_input_fn = data_utils.load_input_fn(
        split=tfds.Split.TRAIN,
        name=FLAGS.dataset,
        batch_size=batch_size,
        use_bfloat16=FLAGS.use_bfloat16,
        proportion=FLAGS.train_proportion,
        validation_set=FLAGS.validation,
        aug_params=aug_params)
    if FLAGS.validation:
        validation_input_fn = data_utils.load_input_fn(
            split=tfds.Split.VALIDATION,
            name=FLAGS.dataset,
            batch_size=FLAGS.per_core_batch_size,
            use_bfloat16=FLAGS.use_bfloat16,
            validation_set=True)
        val_dataset = strategy.experimental_distribute_datasets_from_function(
            validation_input_fn)
    clean_test_input_fn = data_utils.load_input_fn(
        split=tfds.Split.TEST,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size,
        use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = strategy.experimental_distribute_dataset(train_input_fn())
    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            clean_test_input_fn),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=FLAGS.per_core_batch_size *
                                         FLAGS.num_cores,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    ds_info = tfds.builder(FLAGS.dataset).info
    num_train_examples = ds_info.splits['train'].num_examples
    # Train_proportion is a float so need to convert steps_per_epoch to int.
    if FLAGS.validation:
        # TODO(ywenxu): Remove hard-coding validation images.
        steps_per_epoch = int(
            (num_train_examples * FLAGS.train_proportion - 2500) // batch_size)
        steps_per_val = 2500 // (FLAGS.per_core_batch_size * FLAGS.num_cores)
    else:
        steps_per_epoch = int(
            num_train_examples * FLAGS.train_proportion) // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_dropout(
            input_shape=ds_info.features['image'].shape,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            l2=FLAGS.l2,
            dropout_rate=FLAGS.dropout_rate,
            residual_dropout=FLAGS.residual_dropout,
            filterwise_dropout=FLAGS.filterwise_dropout)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = schedules.WarmUpPiecewiseConstantSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            if FLAGS.forget_mixup:
                images, labels, idx = inputs
            else:
                images, labels = inputs
            if FLAGS.augmix and FLAGS.aug_count >= 1:
                # Index 0 at augmix preprocessing is the unperturbed image.
                images = images[:, 1, ...]
                # This is for the case of combining AugMix and Mixup.
                if FLAGS.mixup_alpha > 0:
                    labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]

            images = tf.tile(images,
                             [FLAGS.num_dropout_samples_training, 1, 1, 1])
            if FLAGS.mixup_alpha > 0:
                labels = tf.tile(labels,
                                 [FLAGS.num_dropout_samples_training, 1])
            else:
                labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                if FLAGS.mixup_alpha > 0:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            if FLAGS.mixup_alpha > 0:
                labels = tf.argmax(labels, axis=-1)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)
            if FLAGS.forget_mixup:
                train_predictions = tf.argmax(probs, -1)
                labels = tf.cast(labels, train_predictions.dtype)
                # For each ensemble member (1 here), we accumulate the accuracy counts.
                accuracy_counts = tf.cast(
                    tf.reshape((train_predictions == labels), [1, -1]),
                    tf.float32)
                return accuracy_counts, idx

        if FLAGS.forget_mixup:
            return strategy.run(step_fn, args=(next(iterator), ))
        else:
            strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs

            logits_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            elif dataset_name != 'validation':
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

            if dataset_name == 'validation':
                return tf.reshape(probs, [1, -1, num_classes]), labels

        if dataset_name == 'validation':
            return strategy.run(step_fn, args=(next(iterator), ))
        else:
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    forget_counts_history = []
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        acc_counts_list = []
        idx_list = []
        for step in range(steps_per_epoch):
            if FLAGS.forget_mixup:
                temp_accuracy_counts, temp_idx = train_step(train_iterator)
                acc_counts_list.append(temp_accuracy_counts)
                idx_list.append(temp_idx)
            else:
                train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        # Only one of the forget_mixup and adaptive_mixup can be true.
        if FLAGS.forget_mixup:
            current_acc = [
                tf.concat(list(acc_counts_list[i].values), axis=1)
                for i in range(len(acc_counts_list))
            ]
            total_idx = [
                tf.concat(list(idx_list[i].values), axis=0)
                for i in range(len(idx_list))
            ]
            current_acc = tf.cast(tf.concat(current_acc, axis=1), tf.int32)
            total_idx = tf.concat(total_idx, axis=0)

            current_forget_path = os.path.join(FLAGS.output_dir,
                                               'forget_counts.npy')
            last_acc_path = os.path.join(FLAGS.output_dir, 'last_acc.npy')
            if epoch == 0:
                forget_counts = tf.zeros([1, num_train_examples],
                                         dtype=tf.int32)
                last_acc = tf.zeros([1, num_train_examples], dtype=tf.int32)
            else:
                if 'last_acc' not in locals():
                    with tf.io.gfile.GFile(last_acc_path, 'rb') as f:
                        last_acc = np.load(f)
                    last_acc = tf.cast(tf.convert_to_tensor(last_acc),
                                       tf.int32)
                if 'forget_counts' not in locals():
                    with tf.io.gfile.GFile(current_forget_path, 'rb') as f:
                        forget_counts = np.load(f)
                    forget_counts = tf.cast(
                        tf.convert_to_tensor(forget_counts), tf.int32)

            selected_last_acc = tf.gather(last_acc, total_idx, axis=1)
            forget_this_epoch = tf.cast(current_acc < selected_last_acc,
                                        tf.int32)
            forget_this_epoch = tf.transpose(forget_this_epoch)
            target_shape = tf.constant([num_train_examples, 1])
            current_forget_counts = tf.scatter_nd(
                tf.reshape(total_idx, [-1, 1]), forget_this_epoch,
                target_shape)
            current_forget_counts = tf.transpose(current_forget_counts)
            acc_this_epoch = tf.transpose(current_acc)
            last_acc = tf.scatter_nd(tf.reshape(total_idx, [-1, 1]),
                                     acc_this_epoch, target_shape)
            # This is lower bound of true acc.
            last_acc = tf.transpose(last_acc)

            # TODO(ywenxu): We count the dropped examples as forget. Fix this later.
            forget_counts += current_forget_counts
            forget_counts_history.append(forget_counts)
            logging.info('forgetting counts')
            logging.info(tf.stack(forget_counts_history, 0))
            with tf.io.gfile.GFile(
                    os.path.join(FLAGS.output_dir,
                                 'forget_counts_history.npy'), 'wb') as f:
                np.save(f, tf.stack(forget_counts_history, 0).numpy())
            with tf.io.gfile.GFile(current_forget_path, 'wb') as f:
                np.save(f, forget_counts.numpy())
            with tf.io.gfile.GFile(last_acc_path, 'wb') as f:
                np.save(f, last_acc.numpy())
            aug_params['forget_counts_dir'] = current_forget_path

            train_input_fn = data_utils.load_input_fn(
                split=tfds.Split.TRAIN,
                name=FLAGS.dataset,
                batch_size=batch_size,
                use_bfloat16=FLAGS.use_bfloat16,
                validation_set=FLAGS.validation,
                aug_params=aug_params)
            train_dataset = strategy.experimental_distribute_dataset(
                train_input_fn())
            train_iterator = iter(train_dataset)

        elif FLAGS.adaptive_mixup:
            val_iterator = iter(val_dataset)
            logging.info('Testing on validation dataset')
            predictions_list = []
            labels_list = []
            for step in range(steps_per_val):
                temp_predictions, temp_labels = test_step(
                    val_iterator, 'validation')
                predictions_list.append(temp_predictions)
                labels_list.append(temp_labels)
            predictions = [
                tf.concat(list(predictions_list[i].values), axis=1)
                for i in range(len(predictions_list))
            ]
            labels = [
                tf.concat(list(labels_list[i].values), axis=0)
                for i in range(len(labels_list))
            ]
            predictions = tf.concat(predictions, axis=1)
            labels = tf.cast(tf.concat(labels, axis=0), tf.int64)

            def compute_acc_conf(preds, label, focus_class):
                class_preds = tf.boolean_mask(preds,
                                              label == focus_class,
                                              axis=1)
                class_pred_labels = tf.argmax(class_preds, axis=-1)
                confidence = tf.reduce_mean(
                    tf.reduce_max(class_preds, axis=-1), -1)
                accuracy = tf.reduce_mean(tf.cast(
                    class_pred_labels == focus_class, tf.float32),
                                          axis=-1)
                return accuracy - confidence

            calibration_per_class = [
                compute_acc_conf(predictions, labels, i)
                for i in range(num_classes)
            ]
            calibration_per_class = tf.stack(calibration_per_class, axis=1)
            logging.info('calibration per class')
            logging.info(calibration_per_class)
            mixup_coeff = tf.where(calibration_per_class > 0, 1.0,
                                   FLAGS.mixup_alpha)
            mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1)
            logging.info('mixup coeff')
            logging.info(mixup_coeff)
            aug_params['mixup_coeff'] = mixup_coeff
            train_input_fn = data_utils.load_input_fn(
                split=tfds.Split.TRAIN,
                name=FLAGS.dataset,
                batch_size=batch_size,
                use_bfloat16=FLAGS.use_bfloat16,
                validation_set=True,
                aug_params=aug_params)
            train_dataset = strategy.experimental_distribute_dataset(
                train_input_fn())
            train_iterator = iter(train_dataset)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
Ejemplo n.º 16
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ind_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='ind')
    ood_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='ood')
    all_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='all')

    dataset_builders = {
        'clean': ind_dataset_builder,
        'ood': ood_dataset_builder,
        'all': all_dataset_builder
    }

    ds_info = ind_dataset_builder.tfds_info
    feature_size = ds_info.metadata['feature_size']
    # num_classes is number of valid intents plus out-of-scope intent
    num_classes = ds_info.features['intent_label'].num_classes + 1
    # vocab_size is total number of valid tokens plus the out-of-vocabulary token.
    vocab_size = ind_dataset_builder.tokenizer.num_words + 1

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores

    test_datasets = {}
    steps_per_eval = {}
    for dataset_name, dataset_builder in dataset_builders.items():
        test_datasets[dataset_name] = dataset_builder.load(
            batch_size=batch_size)
        steps_per_eval[
            dataset_name] = dataset_builder.num_examples // batch_size

    bert_config_dir, _ = sngp.resolve_bert_ckpt_and_config_dir(
        FLAGS.bert_dir, FLAGS.bert_config_dir, FLAGS.bert_ckpt_dir)
    bert_config = bert_utils.create_config(bert_config_dir)

    gp_layer_kwargs = dict(num_inducing=FLAGS.gp_hidden_dim,
                           gp_kernel_scale=FLAGS.gp_scale,
                           gp_output_bias=FLAGS.gp_bias,
                           normalize_input=FLAGS.gp_input_normalization,
                           gp_cov_momentum=FLAGS.gp_cov_discount_factor,
                           gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty)
    spec_norm_kwargs = dict(iteration=FLAGS.spec_norm_iteration,
                            norm_multiplier=FLAGS.spec_norm_bound)

    model, bert_encoder = ub.models.SngpBertBuilder(
        num_classes=num_classes,
        bert_config=bert_config,
        gp_layer_kwargs=gp_layer_kwargs,
        spec_norm_kwargs=spec_norm_kwargs,
        use_gp_layer=FLAGS.use_gp_layer,
        use_spec_norm_att=FLAGS.use_spec_norm_att,
        use_spec_norm_ffn=FLAGS.use_spec_norm_ffn,
        use_layer_norm_att=FLAGS.use_layer_norm_att,
        use_layer_norm_ffn=FLAGS.use_layer_norm_ffn,
        use_spec_norm_plr=FLAGS.use_spec_norm_plr)

    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits_list = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval[name]):
                    inputs = next(test_iterator)
                    features, _ = bert_utils.create_feature_and_label(
                        inputs, feature_size)

                    logits, covmat = model(features, training=False)
                    logits = ed.layers.utils.mean_field_logits(
                        logits,
                        covmat,
                        mean_field_factor=FLAGS.gp_mean_field_factor)

                    logits_list.append(logits)

                logits_list = tf.concat(logits_list, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits_list.numpy())

            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }

    for dataset_name, test_dataset in test_datasets.items():
        if dataset_name != 'clean':
            metrics.update({
                'test/nll_{}'.format(dataset_name):
                tf.keras.metrics.Mean(),
                'test/accuracy_{}'.format(dataset_name):
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'test/ece_{}'.format(dataset_name):
                um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
            })

    # Finally, define OOD metrics for the combined IND and OOD dataset.
    metrics.update({
        'test/auroc_all': tf.keras.metrics.AUC(curve='ROC'),
        'test/auprc_all': tf.keras.metrics.AUC(curve='PR')
    })

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval[name]):
            inputs = next(test_iterator)
            _, labels = bert_utils.create_feature_and_label(
                inputs, feature_size)
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce = um.gibbs_cross_entropy(labels, logits)
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                metrics['test/ece_{}'.format(name)].update_state(labels, probs)

            if dataset_name == 'all':
                ood_labels = tf.cast(labels == 150, labels.dtype)
                ood_probs = 1. - tf.reduce_max(probs, axis=-1)
                metrics['test/auroc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)
                metrics['test/auprc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    total_results = {name: metric.result() for name, metric in metrics.items()}
    logging.info('Metrics: %s', total_results)
Ejemplo n.º 17
0
def load_pivot_df_dict_googlefig(
        hdf5file='metrics_googlefig/cifar_model_predictions.hdf5',
        clean_key='test',
        instance=0):
    '''
    load pivoted df_dict from google paper
    '''
    file = h5py.File(hdf5file, 'r')

    corruptions = [
        'corrupt-static-brightness-1', 'corrupt-static-brightness-2',
        'corrupt-static-brightness-3', 'corrupt-static-brightness-4',
        'corrupt-static-brightness-5', 'corrupt-static-contrast-1',
        'corrupt-static-contrast-2', 'corrupt-static-contrast-3',
        'corrupt-static-contrast-4', 'corrupt-static-contrast-5',
        'corrupt-static-defocus_blur-1', 'corrupt-static-defocus_blur-2',
        'corrupt-static-defocus_blur-3', 'corrupt-static-defocus_blur-4',
        'corrupt-static-defocus_blur-5', 'corrupt-static-elastic_transform-1',
        'corrupt-static-elastic_transform-2',
        'corrupt-static-elastic_transform-3',
        'corrupt-static-elastic_transform-4',
        'corrupt-static-elastic_transform-5', 'corrupt-static-fog-1',
        'corrupt-static-fog-2', 'corrupt-static-fog-3', 'corrupt-static-fog-4',
        'corrupt-static-fog-5', 'corrupt-static-frost-1',
        'corrupt-static-frost-2', 'corrupt-static-frost-3',
        'corrupt-static-frost-4', 'corrupt-static-frost-5',
        'corrupt-static-gaussian_blur-1', 'corrupt-static-gaussian_blur-2',
        'corrupt-static-gaussian_blur-3', 'corrupt-static-gaussian_blur-4',
        'corrupt-static-gaussian_blur-5', 'corrupt-static-gaussian_noise-1',
        'corrupt-static-gaussian_noise-2', 'corrupt-static-gaussian_noise-3',
        'corrupt-static-gaussian_noise-4', 'corrupt-static-gaussian_noise-5',
        'corrupt-static-glass_blur-1', 'corrupt-static-glass_blur-2',
        'corrupt-static-glass_blur-3', 'corrupt-static-glass_blur-4',
        'corrupt-static-glass_blur-5', 'corrupt-static-impulse_noise-1',
        'corrupt-static-impulse_noise-2', 'corrupt-static-impulse_noise-3',
        'corrupt-static-impulse_noise-4', 'corrupt-static-impulse_noise-5',
        'corrupt-static-pixelate-1', 'corrupt-static-pixelate-2',
        'corrupt-static-pixelate-3', 'corrupt-static-pixelate-4',
        'corrupt-static-pixelate-5', 'corrupt-static-saturate-1',
        'corrupt-static-saturate-2', 'corrupt-static-saturate-3',
        'corrupt-static-saturate-4', 'corrupt-static-saturate-5',
        'corrupt-static-shot_noise-1', 'corrupt-static-shot_noise-2',
        'corrupt-static-shot_noise-3', 'corrupt-static-shot_noise-4',
        'corrupt-static-shot_noise-5', 'corrupt-static-spatter-1',
        'corrupt-static-spatter-2', 'corrupt-static-spatter-3',
        'corrupt-static-spatter-4', 'corrupt-static-spatter-5',
        'corrupt-static-speckle_noise-1', 'corrupt-static-speckle_noise-2',
        'corrupt-static-speckle_noise-3', 'corrupt-static-speckle_noise-4',
        'corrupt-static-speckle_noise-5', 'corrupt-static-zoom_blur-1',
        'corrupt-static-zoom_blur-2', 'corrupt-static-zoom_blur-3',
        'corrupt-static-zoom_blur-4', 'corrupt-static-zoom_blur-5'
    ]
    clean = [clean_key]
    corruptions += clean
    gfig_dict = dict()

    metrics = [
        tf.keras.metrics.SparseCategoricalAccuracy(name='acc'),
        um.ExpectedCalibrationError(num_bins=15, name='ece'),
        nll(name='nll'),
        BrierScore(name='brier')
    ]

    for model_name, model_data in file.items():
        #print(model_name)
        df = pd.DataFrame(columns=['acc', 'brier', 'ece', 'nll'])
        df.columns.name = 'metrics'
        df.index.name = 'dataset'

        for key in model_data.keys():
            if key in corruptions:
                #model_data[key]

                y = model_data[key]['labels']
                p = model_data[key]['probs']
                if 'roll' in key:
                    key = 'roll'
                for metric in metrics:
                    metric.update_state(y[instance, :].astype(np.int32),
                                        p[instance, :].astype(np.float32))
                d0 = {
                    'acc': metrics[0].result().numpy(),
                    'ece': metrics[1].result().numpy(),
                    'nll': metrics[2].result().numpy(),
                    'brier': metrics[3].result().numpy()
                }
                record = pd.Series(d0, index=df.columns, name=key)
                df = df.append(record)
                for metric in metrics:
                    metric.reset_states()

        gfig_dict[model_name] = df.copy()

    return gfig_dict
Ejemplo n.º 18
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  ds_info = tfds.builder(FLAGS.dataset).info
  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_eval = ds_info.splits['test'].num_examples // batch_size
  num_classes = ds_info.features['label'].num_classes

  dataset_input_fn = utils.load_input_fn(
      split=tfds.Split.TEST,
      name=FLAGS.dataset,
      batch_size=FLAGS.per_core_batch_size,
      use_bfloat16=FLAGS.use_bfloat16)
  test_datasets = {'clean': dataset_input_fn()}
  corruption_types, max_intensity = utils.load_corrupted_test_info(
      FLAGS.dataset)
  for name in corruption_types:
    for intensity in range(1, max_intensity + 1):
      dataset_name = '{0}_{1}'.format(name, intensity)
      if FLAGS.dataset == 'cifar10':
        load_c_dataset = utils.load_cifar10_c_input_fn
      else:
        load_c_dataset = functools.partial(
            utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path)
      corrupted_input_fn = load_c_dataset(
          corruption_name=name,
          corruption_intensity=intensity,
          batch_size=FLAGS.per_core_batch_size,
          use_bfloat16=FLAGS.use_bfloat16)
      test_datasets[dataset_name] = corrupted_input_fn()

  model = ub.models.wide_resnet(
      input_shape=ds_info.features['image'].shape,
      depth=28,
      width_multiplier=10,
      num_classes=num_classes,
      l2=0.,
      version=2)
  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())

  # Search for checkpoints from their index file; then remove the index suffix.
  ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
                                                     '**/*.index'))
  ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
  ensemble_size = len(ensemble_filenames)
  logging.info('Ensemble size: %s', ensemble_size)
  logging.info('Ensemble number of weights: %s',
               ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, ensemble_filename in enumerate(ensemble_filenames):
    checkpoint.restore(ensemble_filename)
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits = []
        test_iterator = iter(test_dataset)
        for _ in range(steps_per_eval):
          features, _ = next(test_iterator)  # pytype: disable=attribute-error
          logits.append(model(features, training=False))

        logits = tf.concat(logits, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits.numpy())
      percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent,
                                            m + 1,
                                            ensemble_size,
                                            n + 1,
                                            num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
      'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
  }
  corrupt_metrics = {}
  for name in test_datasets:
    corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
    corrupt_metrics['test/accuracy_{}'.format(name)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
    corrupt_metrics['test/ece_{}'.format(name)] = (
        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
  for i in range(ensemble_size):
    metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
    metrics['test/accuracy_member_{}'.format(i)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
  test_diversity = {
      'test/disagreement': tf.keras.metrics.Mean(),
      'test/average_kl': tf.keras.metrics.Mean(),
      'test/cosine_similarity': tf.keras.metrics.Mean(),
  }
  metrics.update(test_diversity)

  # Evaluate model predictions.
  for n, (name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for m in range(ensemble_size):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      _, labels = next(test_iterator)  # pytype: disable=attribute-error
      logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
      labels = tf.cast(labels, tf.int32)
      negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
      per_probs = tf.nn.softmax(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      if name == 'clean':
        gibbs_ce = um.gibbs_cross_entropy(labels, logits)
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)

        for i in range(ensemble_size):
          member_probs = per_probs[i]
          member_loss = tf.keras.losses.sparse_categorical_crossentropy(
              labels, member_probs)
          metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
          metrics['test/accuracy_member_{}'.format(i)].update_state(
              labels, member_probs)
        diversity_results = um.average_pairwise_diversity(
            per_probs, ensemble_size)
        for k, v in diversity_results.items():
          test_diversity['test/' + k].update_state(v)
      else:
        corrupt_metrics['test/nll_{}'.format(name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(name)].update_state(
            labels, probs)

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                    corruption_types,
                                                    max_intensity)
  total_results = {name: metric.result() for name, metric in metrics.items()}
  total_results.update(corrupt_results)
  logging.info('Metrics: %s', total_results)
Ejemplo n.º 19
0
def main(argv):
  del argv  # unused arg
  tf.io.gfile.makedirs(FLAGS.output_dir)
  logging.info('Saving checkpoints at %s', FLAGS.output_dir)
  tf.random.set_seed(FLAGS.seed)

  train_batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores
                      // FLAGS.batch_repetitions)
  test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // train_batch_size
  steps_per_eval = IMAGENET_VALIDATION_IMAGES // test_batch_size

  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.TPUStrategy(resolver)

  train_builder = ub.datasets.ImageNetDataset(
      split=tfds.Split.TRAIN,
      use_bfloat16=FLAGS.use_bfloat16)
  train_dataset = train_builder.load(
      batch_size=train_batch_size, strategy=strategy)
  test_builder = ub.datasets.ImageNetDataset(
      split=tfds.Split.TEST,
      use_bfloat16=FLAGS.use_bfloat16)
  test_dataset = test_builder.load(
      batch_size=test_batch_size, strategy=strategy)

  if FLAGS.use_bfloat16:
    tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')

  with strategy.scope():
    logging.info('Building Keras ResNet-50 model')
    model = imagenet_model.resnet50(
        input_shape=(FLAGS.ensemble_size, 224, 224, 3),
        num_classes=NUM_CLASSES,
        ensemble_size=FLAGS.ensemble_size,
        width_multiplier=FLAGS.width_multiplier)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Scale learning rate and decay epochs by vanilla settings.
    base_lr = FLAGS.base_learning_rate * train_batch_size / 256
    decay_epochs = [
        (FLAGS.train_epochs * int(FLAGS.lr_decay_epochs[0])) // 90,
        (FLAGS.train_epochs * int(FLAGS.lr_decay_epochs[1])) // 90,
        (FLAGS.train_epochs * int(FLAGS.lr_decay_epochs[2])) // 90,
    ]
    learning_rate = ub.schedules.WarmUpPiecewiseConstantSchedule(
        steps_per_epoch=steps_per_epoch,
        base_learning_rate=base_lr,
        decay_ratio=0.1,
        decay_epochs=decay_epochs,
        warmup_epochs=FLAGS.lr_warmup_epochs)
    optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                        momentum=0.9,
                                        nesterov=True)
    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }

    for i in range(FLAGS.ensemble_size):
      metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
      metrics['test/accuracy_member_{}'.format(i)] = (
          tf.keras.metrics.SparseCategoricalAccuracy())
    test_diversity = {
        'test/disagreement': tf.keras.metrics.Mean(),
        'test/average_kl': tf.keras.metrics.Mean(),
        'test/cosine_similarity': tf.keras.metrics.Mean(),
    }
    logging.info('Finished building Keras ResNet-50 model')

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  @tf.function
  def train_step(iterator):
    """Training StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      batch_size = tf.shape(images)[0]
      main_shuffle = tf.random.shuffle(tf.tile(
          tf.range(batch_size), [FLAGS.batch_repetitions]))
      to_shuffle = tf.cast(tf.cast(tf.shape(main_shuffle)[0], tf.float32)
                           * (1. - FLAGS.input_repetition_probability),
                           tf.int32)
      shuffle_indices = [
          tf.concat([tf.random.shuffle(main_shuffle[:to_shuffle]),
                     main_shuffle[to_shuffle:]], axis=0)
          for _ in range(FLAGS.ensemble_size)]
      images = tf.stack([tf.gather(images, indices, axis=0)
                         for indices in shuffle_indices], axis=1)
      labels = tf.stack([tf.gather(labels, indices, axis=0)
                         for indices in shuffle_indices], axis=1)

      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        if FLAGS.use_bfloat16:
          logits = tf.cast(logits, tf.float32)

        negative_log_likelihood = tf.reduce_mean(tf.reduce_sum(
            tf.keras.losses.sparse_categorical_crossentropy(labels,
                                                            logits,
                                                            from_logits=True),
            axis=1))
        filtered_variables = []
        for var in model.trainable_variables:
          # Apply l2 on the weights. This excludes BN parameters and biases, but
          # pay caution to their naming scheme.
          if 'kernel' in var.name or 'bias' in var.name:
            filtered_variables.append(tf.reshape(var, (-1,)))

        l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
            tf.concat(filtered_variables, axis=0))
        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        loss = negative_log_likelihood + l2_loss
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)
      optimizer.apply_gradients(zip(grads, model.trainable_variables))

      probs = tf.nn.softmax(tf.reshape(logits, [-1, NUM_CLASSES]))
      flat_labels = tf.reshape(labels, [-1])
      metrics['train/ece'].update_state(flat_labels, probs)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(flat_labels, probs)

    strategy.run(step_fn, args=(next(iterator),))

  @tf.function
  def test_step(iterator):
    """Evaluation StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      images = tf.tile(
          tf.expand_dims(images, 1), [1, FLAGS.ensemble_size, 1, 1, 1])
      logits = model(images, training=False)
      if FLAGS.use_bfloat16:
        logits = tf.cast(logits, tf.float32)
      probs = tf.nn.softmax(logits)

      per_probs = tf.transpose(probs, perm=[1, 0, 2])
      diversity_results = um.average_pairwise_diversity(
          per_probs, FLAGS.ensemble_size)
      for k, v in diversity_results.items():
        test_diversity['test/' + k].update_state(v)

      for i in range(FLAGS.ensemble_size):
        member_probs = probs[:, i]
        member_loss = tf.keras.losses.sparse_categorical_crossentropy(
            labels, member_probs)
        metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
        metrics['test/accuracy_member_{}'.format(i)].update_state(
            labels, member_probs)

      # Negative log marginal likelihood computed in a numerically-stable way.
      labels_tiled = tf.tile(
          tf.expand_dims(labels, 1), [1, FLAGS.ensemble_size])
      log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
          labels_tiled, logits, from_logits=True)
      negative_log_likelihood = tf.reduce_mean(
          -tf.reduce_logsumexp(log_likelihoods, axis=[1]) +
          tf.math.log(float(FLAGS.ensemble_size)))
      probs = tf.math.reduce_mean(probs, axis=1)  # marginalize

      metrics['test/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['test/accuracy'].update_state(labels, probs)
      metrics['test/ece'].update_state(labels, probs)

    strategy.run(step_fn, args=(next(iterator),))

  metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

  train_iterator = iter(train_dataset)
  start_time = time.time()
  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)
    for step in range(steps_per_epoch):
      train_step(train_iterator)

      current_step = epoch * steps_per_epoch + (step + 1)
      max_steps = steps_per_epoch * FLAGS.train_epochs
      time_elapsed = time.time() - start_time
      steps_per_sec = float(current_step) / time_elapsed
      eta_seconds = (max_steps - current_step) / steps_per_sec
      message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                     current_step / max_steps,
                     epoch + 1,
                     FLAGS.train_epochs,
                     steps_per_sec,
                     eta_seconds / 60,
                     time_elapsed / 60))
      if step % 20 == 0:
        logging.info(message)

    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      if step % 20 == 0:
        logging.info('Starting to run eval step %s of epoch: %s', step, epoch)
      test_start_time = time.time()
      test_step(test_iterator)
      ms_per_example = (time.time() - test_start_time) * 1e6 / test_batch_size
      metrics['test/ms_per_example'].update_state(ms_per_example)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    for i in range(FLAGS.ensemble_size):
      logging.info('Member %d Test Loss: %.4f, Accuracy: %.2f%%',
                   i, metrics['test/nll_member_{}'.format(i)].result(),
                   metrics['test/accuracy_member_{}'.format(i)].result() * 100)

    total_metrics = metrics.copy()
    total_metrics.update(test_diversity)
    total_results = {name: metric.result()
                     for name, metric in total_metrics.items()}
    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)

    for _, metric in total_metrics.items():
      metric.reset_states()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(os.path.join(
          FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)

  final_save_name = os.path.join(FLAGS.output_dir, 'model')
  model.save(final_save_name)
  logging.info('Saved model to %s', final_save_name)
Ejemplo n.º 20
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores //
                  FLAGS.num_dropout_samples_training)
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    num_classes = ds_info.features['label'].num_classes

    aug_params = {
        'augmix': FLAGS.augmix,
        'aug_count': FLAGS.aug_count,
        'augmix_depth': FLAGS.augmix_depth,
        'augmix_prob_coeff': FLAGS.augmix_prob_coeff,
        'augmix_width': FLAGS.augmix_width,
        'ensemble_size': 1,
        'mixup_alpha': FLAGS.mixup_alpha,
    }
    validation_proportion = 1. - FLAGS.train_proportion
    use_validation_set = validation_proportion > 0.
    train_dataset = data_utils.load_dataset(
        split=tfds.Split.TRAIN,
        name=FLAGS.dataset,
        batch_size=batch_size,
        use_bfloat16=FLAGS.use_bfloat16,
        aug_params=aug_params,
        validation_set=use_validation_set,
        validation_proportion=validation_proportion)
    train_sample_size = ds_info.splits[
        'train'].num_examples * FLAGS.train_proportion
    val_sample_size = ds_info.splits['train'].num_examples - train_sample_size
    if use_validation_set:
        validation_dataset = data_utils.load_dataset(
            split=tfds.Split.VALIDATION,
            name=FLAGS.dataset,
            batch_size=batch_size,
            use_bfloat16=FLAGS.use_bfloat16,
            aug_params=aug_params,
            validation_set=use_validation_set,
            validation_proportion=validation_proportion)
        validation_dataset = strategy.experimental_distribute_dataset(
            validation_dataset)
        steps_per_val = steps_per_epoch = int(val_sample_size / batch_size)
    clean_test_dataset = utils.load_dataset(split=tfds.Split.TEST,
                                            name=FLAGS.dataset,
                                            batch_size=test_batch_size,
                                            use_bfloat16=FLAGS.use_bfloat16)

    train_sample_size = ds_info.splits[
        'train'].num_examples * FLAGS.train_proportion
    steps_per_epoch = int(train_sample_size / batch_size)
    steps_per_epoch = ds_info.splits['train'].num_examples // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=test_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        if FLAGS.use_spec_norm:
            logging.info('Use Spectral Normalization with norm bound %.2f',
                         FLAGS.spec_norm_bound)
        if FLAGS.use_gp_layer:
            logging.info('Use GP layer with hidden units %d',
                         FLAGS.gp_hidden_dim)

        model = ub.models.wide_resnet_sngp(
            input_shape=ds_info.features['image'].shape,
            batch_size=batch_size,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            l2=FLAGS.l2,
            use_mc_dropout=FLAGS.use_mc_dropout,
            use_filterwise_dropout=FLAGS.use_filterwise_dropout,
            dropout_rate=FLAGS.dropout_rate,
            use_gp_layer=FLAGS.use_gp_layer,
            gp_input_dim=FLAGS.gp_input_dim,
            gp_hidden_dim=FLAGS.gp_hidden_dim,
            gp_scale=FLAGS.gp_scale,
            gp_bias=FLAGS.gp_bias,
            gp_input_normalization=FLAGS.gp_input_normalization,
            gp_random_feature_type=FLAGS.gp_random_feature_type,
            gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
            gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
            use_spec_norm=FLAGS.use_spec_norm,
            spec_norm_iteration=FLAGS.spec_norm_iteration,
            spec_norm_bound=FLAGS.spec_norm_bound)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/stddev': tf.keras.metrics.Mean(),
        }
        if use_validation_set:
            metrics.update({
                'val/negative_log_likelihood':
                tf.keras.metrics.Mean(),
                'val/accuracy':
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'val/ece':
                um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
                'val/stddev':
                tf.keras.metrics.Mean(),
            })
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/stddev_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator, step):
        """Training StepFn."""
        def step_fn(inputs, step):
            """Per-Replica StepFn."""
            images, labels = inputs

            if tf.equal(step, 0) and FLAGS.gp_cov_discount_factor < 0:
                # Resetting covaraince estimator at the begining of a new epoch.
                model.layers[-1].reset_covariance_matrix()

            if FLAGS.augmix and FLAGS.aug_count >= 1:
                # Index 0 at augmix preprocessing is the unperturbed image.
                images = images[:, 1, ...]
                # This is for the case of combining AugMix and Mixup.
                if FLAGS.mixup_alpha > 0:
                    labels = tf.split(labels, FLAGS.aug_count + 1, axis=0)[1]
            images = tf.tile(images,
                             [FLAGS.num_dropout_samples_training, 1, 1, 1])
            if FLAGS.mixup_alpha > 0:
                labels = tf.tile(labels,
                                 [FLAGS.num_dropout_samples_training, 1])
            else:
                labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                if FLAGS.mixup_alpha > 0:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))

                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            if FLAGS.mixup_alpha > 0:
                labels = tf.argmax(labels, axis=-1)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), step))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs

            logits_list = []
            stddev_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)
                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract both
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.per_core_batch_size)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                logits = ed.layers.utils.mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                stddev_list.append(stddev)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
                metrics['test/stddev'].update_state(stddev)
            elif dataset_name == 'val':
                metrics['val/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['val/accuracy'].update_state(labels, probs)
                metrics['val/ece'].update_state(labels, probs)
                metrics['val/stddev'].update_state(stddev)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/stddev_{}'.format(
                    dataset_name)].update_state(stddev)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    step_variable = tf.Variable(0, dtype=tf.int32)
    train_iterator = iter(train_dataset)
    start_time = time.time()

    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            step_variable.assign(step)
            # Pass `step` as a tf.Variable to train_step to prevent the tf.function
            # train_step() re-compiling itself at each function call.
            train_step(train_iterator, step_variable)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if use_validation_set:
            datasets_to_evaluate['val'] = validation_dataset
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            steps_per_eval = steps_per_val if dataset_name == 'val' else steps_per_eval
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        if use_validation_set:
            logging.info('Val NLL: %.4f, Accuracy: %.2f%%',
                         metrics['val/negative_log_likelihood'].result(),
                         metrics['val/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)

    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
Ejemplo n.º 21
0
def run(trial_dir: str, flag_string: Optional[str]):
    """Run the experiment.

  Args:
    trial_dir: String to the dir to write checkpoints to and read them from.
    flag_string: Optional string used to record what flags the job was run with.
  """
    tf.random.set_seed(FLAGS.seed)
    np.random.seed(FLAGS.seed)

    if not FLAGS.eval_frequency:
        FLAGS.eval_frequency = FLAGS.log_frequency

    if FLAGS.eval_frequency % FLAGS.log_frequency != 0:
        raise ValueError(
            'log_frequency ({}) must evenly divide eval_frequency '
            '({}).'.format(FLAGS.log_frequency, FLAGS.eval_frequency))

    strategy = ub.strategy_utils.get_strategy(FLAGS.tpu,
                                              use_tpu=not FLAGS.use_cpu
                                              and not FLAGS.use_gpu)
    with strategy.scope():
        _maybe_setup_trial_dir(strategy, trial_dir, flag_string)

        # TODO(znado): pass all dataset and model kwargs.
        train_dataset_builder = ub.datasets.get(
            dataset_name=FLAGS.dataset_name,
            split='train',
            validation_percent=FLAGS.validation_percent,
            shuffle_buffer_size=FLAGS.shuffle_buffer_size)
        if FLAGS.validation_percent > 0:
            validation_dataset_builder = ub.datasets.get(
                dataset_name=FLAGS.dataset_name,
                split='validation',
                validation_percent=FLAGS.validation_percent,
                shuffle_buffer_size=FLAGS.shuffle_buffer_size)
        else:
            validation_dataset_builder = None
        test_dataset_builder = ub.datasets.get(
            dataset_name=FLAGS.dataset_name,
            split='test',
            validation_percent=FLAGS.validation_percent,
            shuffle_buffer_size=FLAGS.shuffle_buffer_size)

        if FLAGS.use_spec_norm:
            logging.info('Use spectral normalization.')
            spec_norm_hparams = {
                'spec_norm_bound': FLAGS.spec_norm_bound,
                'spec_norm_iteration': FLAGS.spec_norm_iteration
            }
        else:
            spec_norm_hparams = None

        if FLAGS.use_gp_layer:
            logging.info('Use GP for output layer.')
            gp_layer_hparams = {
                'gp_input_dim': FLAGS.gp_input_dim,
                'gp_hidden_dim': FLAGS.gp_hidden_dim,
                'gp_scale': FLAGS.gp_scale,
                'gp_bias': FLAGS.gp_bias,
                'gp_input_normalization': FLAGS.gp_input_normalization,
                'gp_cov_discount_factor': FLAGS.gp_cov_discount_factor,
                'gp_cov_ridge_penalty': FLAGS.gp_cov_ridge_penalty
            }
        else:
            gp_layer_hparams = None

        model = ub_smu_models.get(
            FLAGS.model_name,
            num_classes=FLAGS.num_classes,
            batch_size=FLAGS.batch_size,
            len_seqs=FLAGS.len_seqs,
            num_motifs=FLAGS.num_motifs,
            len_motifs=FLAGS.len_motifs,
            num_denses=FLAGS.num_denses,
            depth=FLAGS.wide_resnet_depth,
            width=FLAGS.wide_resnet_depth,
            l2_weight=FLAGS.l2_regularization,
            depth_multiplier=FLAGS.wide_resnet_width_multiplier,
            dropout_rate=FLAGS.dropout_rate,
            before_conv_dropout=FLAGS.before_conv_dropout,
            use_mc_dropout=FLAGS.use_mc_dropout,
            spec_norm_hparams=spec_norm_hparams,
            gp_layer_hparams=gp_layer_hparams)

        metrics = {
            'accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'brier_score': BrierScore(name='brier_score'),
            'ece': um.ExpectedCalibrationError(num_bins=10, name='ece'),
            'loss': tf.keras.metrics.SparseCategoricalCrossentropy(),
        }

        # Record all non-default hparams in tensorboard.
        hparams = _get_hparams()

        ood_dataset_builder = None
        ood_metrics = None
        if FLAGS.run_ood:
            if 'cifar' in FLAGS.dataset_name and FLAGS.ood_dataset_name == 'svhn':
                svhn_normalize_by_cifar = True
            else:
                svhn_normalize_by_cifar = False

            ood_dataset_builder_cls = ub.datasets.DATASETS[
                FLAGS.ood_dataset_name]
            ood_dataset_builder_cls = ub.datasets.make_ood_dataset(
                ood_dataset_builder_cls)
            ood_dataset_builder = ood_dataset_builder_cls(
                in_distribution_dataset=test_dataset_builder,
                name=FLAGS.ood_dataset_name,
                split='test',
                validation_percent=FLAGS.validation_percent,
                normalize_by_cifar=svhn_normalize_by_cifar,
                data_mode='ood')
            _check_batch_replica_divisible(FLAGS.eval_batch_size, strategy)

            ood_metrics = {
                'auroc':
                tf.keras.metrics.AUC(curve='ROC',
                                     summation_method='interpolation'),
                'auprc':
                tf.keras.metrics.AUC(curve='PR',
                                     summation_method='interpolation')
            }

            aux_metrics = [
                ('spec_at_sen', tf.keras.metrics.SpecificityAtSensitivity,
                 FLAGS.sensitivity_thresholds),
                ('sen_at_spec', tf.keras.metrics.SensitivityAtSpecificity,
                 FLAGS.specificity_thresholds),
                ('prec_at_rec', tf.keras.metrics.PrecisionAtRecall,
                 FLAGS.recall_thresholds),
                ('rec_at_prec', tf.keras.metrics.RecallAtPrecision,
                 FLAGS.precision_thresholds)
            ]

            for metric_name, metric_fn, threshold_vals in aux_metrics:
                vals = [float(x) for x in threshold_vals]
                thresholds = np.linspace(vals[0], vals[1], int(vals[2]))
                for thresh in thresholds:
                    name = f'{metric_name}_{thresh:.2f}'
                    ood_metrics[name] = metric_fn(thresh)

        if FLAGS.mode == 'eval':
            _check_batch_replica_divisible(FLAGS.eval_batch_size, strategy)
            eval_lib.run_eval_loop(
                validation_dataset_builder=validation_dataset_builder,
                test_dataset_builder=test_dataset_builder,
                batch_size=FLAGS.eval_batch_size,
                model=model,
                trial_dir=trial_dir,
                train_steps=FLAGS.train_steps,
                strategy=strategy,
                metrics=metrics,
                checkpoint_step=FLAGS.checkpoint_step,
                hparams=hparams,
                ood_dataset_builder=ood_dataset_builder,
                ood_metrics=ood_metrics)
            return

        if FLAGS.mode == 'train_and_eval':
            _check_batch_replica_divisible(FLAGS.eval_batch_size, strategy)

        steps_per_epoch = train_dataset_builder.num_examples // FLAGS.batch_size
        optimizer_kwargs = {
            k[len('optimizer_hparams_'):]: FLAGS[k].value
            for k in FLAGS if k.startswith('optimizer_hparams_')
        }
        optimizer_kwargs.update({
            k[len('schedule_hparams_'):]: FLAGS[k].value
            for k in FLAGS if k.startswith('schedule_hparams_')
        })

        optimizer = ub.optimizers.get(
            optimizer_name=FLAGS.optimizer,
            learning_rate_schedule=FLAGS.learning_rate_schedule,
            learning_rate=FLAGS.learning_rate,
            weight_decay=FLAGS.weight_decay,
            steps_per_epoch=steps_per_epoch,
            model=model,
            **optimizer_kwargs)

        train_lib.run_train_loop(
            train_dataset_builder=train_dataset_builder,
            validation_dataset_builder=validation_dataset_builder,
            test_dataset_builder=test_dataset_builder,
            batch_size=FLAGS.batch_size,
            eval_batch_size=FLAGS.eval_batch_size,
            model=model,
            optimizer=optimizer,
            eval_frequency=FLAGS.eval_frequency,
            log_frequency=FLAGS.log_frequency,
            trial_dir=trial_dir,
            train_steps=FLAGS.train_steps,
            mode=FLAGS.mode,
            strategy=strategy,
            metrics=metrics,
            hparams=hparams,
            ood_dataset_builder=ood_dataset_builder,
            ood_metrics=ood_metrics,
            focal_loss_gamma=FLAGS.focal_loss_gamma)
Ejemplo n.º 22
0
def main(argv):

  del argv  # unused arg
  tf.io.gfile.makedirs(FLAGS.output_dir)
  logging.info('Saving checkpoints at %s', FLAGS.output_dir)
  tf.random.set_seed(FLAGS.seed)

  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
  steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.TPUStrategy(resolver)

  enable_mixup = (FLAGS.mixup_alpha > 0.0)
  mixup_params = {
      'mixup_alpha': FLAGS.mixup_alpha,
      'adaptive_mixup': False,
      'same_mix_weight_per_batch': FLAGS.same_mix_weight_per_batch,
      'use_random_shuffling': FLAGS.use_random_shuffling,
      'use_truncated_beta': FLAGS.use_truncated_beta
  }

  train_builder = utils.ImageNetInput(
      data_dir=FLAGS.data_dir,
      use_bfloat16=FLAGS.use_bfloat16,
      one_hot=True,
      mixup_params=mixup_params)
  test_builder = utils.ImageNetInput(
      data_dir=FLAGS.data_dir, use_bfloat16=FLAGS.use_bfloat16)
  train_dataset = train_builder.as_dataset(
      split=tfds.Split.TRAIN, batch_size=batch_size)
  test_dataset = test_builder.as_dataset(
      split=tfds.Split.TEST, batch_size=batch_size)
  train_dataset = strategy.experimental_distribute_dataset(train_dataset)
  test_dataset = strategy.experimental_distribute_dataset(test_dataset)

  if enable_mixup:

    mean_theta = mean_truncated_beta_distribution(FLAGS.mixup_alpha)

    # Train set to compute the means of the images and of the (one-hot) labels
    imagenet_train_no_mixup = utils.ImageNetInput(
        data_dir=FLAGS.data_dir, use_bfloat16=FLAGS.use_bfloat16, one_hot=True)
    imagenet_train_no_mixup = imagenet_train_no_mixup.as_dataset(
        split=tfds.Split.TRAIN, batch_size=batch_size)
    tr_data_no_mixup = strategy.experimental_distribute_dataset(
        imagenet_train_no_mixup)

  if FLAGS.use_bfloat16:
    policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
    tf.keras.mixed_precision.experimental.set_policy(policy)

  with strategy.scope():

    if enable_mixup:
      # Variables used to track the means of the images and the (one-hot) labels
      count = tf.Variable(tf.zeros((1,), dtype=tf.float32))
      mean_images = tf.Variable(tf.zeros(IMAGE_SHAPE, dtype=tf.float32))
      mean_labels = tf.Variable(tf.zeros((NUM_CLASSES,), dtype=tf.float32))

    logging.info('Building Keras ResNet-50 model')
    model = ub.models.resnet50_deterministic(input_shape=IMAGE_SHAPE,
                                             num_classes=NUM_CLASSES)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Scale learning rate and decay epochs by vanilla settings.
    base_lr = FLAGS.base_learning_rate * batch_size / 256
    learning_rate = utils.LearningRateSchedule(steps_per_epoch,
                                               base_lr,
                                               FLAGS.train_epochs,
                                               _LR_SCHEDULE)
    optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                        momentum=0.9,
                                        nesterov=True)
    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    logging.info('Finished building Keras ResNet-50 model')

    if enable_mixup:
      # With mixup enabled, we log the predictions with the rescaling from [2]
      metrics['test/negative_log_likelihood+rescaling'] = (tf.keras.metrics
                                                           .Mean())
      metrics['test/accuracy+rescaling'] = (tf.keras.metrics
                                            .SparseCategoricalAccuracy())
      metrics['test/ece+rescaling'] = um.ExpectedCalibrationError(
          num_bins=FLAGS.num_bins)

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  @tf.function
  def moving_average_step(iterator):
    """Training StepFn to compute the means of the images and labels."""

    def step_fn_labels(labels):
      return tf.reduce_mean(labels, axis=0)

    def step_fn_images(images):
      return tf.reduce_mean(tf.cast(images, tf.float32), axis=0)

    new_count = count + 1.
    count.assign(new_count)

    images, labels = next(iterator)

    per_replica_means = strategy.run(step_fn_labels, args=(labels,))
    cr_replica_means = strategy.reduce('mean', per_replica_means, axis=0)
    mean_labels.assign(cr_replica_means/count + (count-1.)/count * mean_labels)

    per_replica_means = strategy.run(step_fn_images, args=(images,))
    cr_replica_means = strategy.reduce('mean', per_replica_means, axis=0)
    mean_images.assign(cr_replica_means/count + (count-1.)/count * mean_images)

  @tf.function
  def train_step(iterator):
    """Training StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs

      with tf.GradientTape() as tape:

        logits = model(images, training=True)
        if FLAGS.use_bfloat16:
          logits = tf.cast(logits, tf.float32)

        negative_log_likelihood = tf.reduce_mean(
            tf.keras.losses.categorical_crossentropy(
                labels, logits, from_logits=True))

        filtered_variables = []
        for var in model.trainable_variables:
          # Apply l2 on the weights. This excludes BN parameters and biases, but
          # pay caution to their naming scheme.
          if 'kernel' in var.name or 'bias' in var.name:
            filtered_variables.append(tf.reshape(var, (-1,)))

        l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
            tf.concat(filtered_variables, axis=0))
        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        loss = negative_log_likelihood + l2_loss
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)
      optimizer.apply_gradients(zip(grads, model.trainable_variables))

      probs = tf.nn.softmax(logits)

      # We go back from one-hot labels to integers
      labels = tf.argmax(labels, axis=-1)

      metrics['train/ece'].update_state(labels, probs)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(labels, logits)

    strategy.run(step_fn, args=(next(iterator),))

  @tf.function
  def update_test_metrics(labels, logits, metric_suffix=''):
    negative_log_likelihood = tf.reduce_mean(
        tf.keras.losses.sparse_categorical_crossentropy(
            labels, logits, from_logits=True))
    probs = tf.nn.softmax(logits)
    metrics['test/negative_log_likelihood' + metric_suffix].update_state(
        negative_log_likelihood)
    metrics['test/accuracy' + metric_suffix].update_state(labels, probs)
    metrics['test/ece' + metric_suffix].update_state(labels, probs)

  @tf.function
  def test_step(iterator):
    """Evaluation StepFn."""
    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs

      logits = model(images, training=False)
      if FLAGS.use_bfloat16:
        logits = tf.cast(logits, tf.float32)

      update_test_metrics(labels, logits)

      # Rescaling logic in Eq.(15) from [2]
      if enable_mixup:
        images *= mean_theta
        images += (1.-mean_theta) * tf.cast(mean_images, images.dtype)

        scaled_logits = model(images, training=False)
        if FLAGS.use_bfloat16:
          scaled_logits = tf.cast(scaled_logits, tf.float32)

        scaled_logits *= 1./mean_theta
        scaled_logits += (1.-1./mean_theta) * tf.cast(mean_labels, logits.dtype)

        update_test_metrics(labels, scaled_logits, '+rescaling')

    strategy.run(step_fn, args=(next(iterator),))

  metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

  if enable_mixup:
    logging.info('Starting to compute the means of labels and images')
    tr_iterator_no_mixup = iter(tr_data_no_mixup)
    for step in range(steps_per_epoch):
      moving_average_step(tr_iterator_no_mixup)
    # Save stats required by the mixup rescaling [2] for subsequent predictions
    mixup_rescaling_stats = {
        'mean_labels': mean_labels.numpy(),
        'mean_images': mean_images.numpy(),
        'mean_theta': mean_theta
    }
    output_dir = os.path.join(FLAGS.output_dir, 'mixup_rescaling_stats.npz')
    with tf.io.gfile.GFile(output_dir, 'wb') as f:
      np.save(f, list(mixup_rescaling_stats.items()))
    logging.info('Finished to compute the means of labels and images')

  train_iterator = iter(train_dataset)
  start_time = time.time()
  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)
    for step in range(steps_per_epoch):
      train_step(train_iterator)

      current_step = epoch * steps_per_epoch + (step + 1)
      max_steps = steps_per_epoch * FLAGS.train_epochs
      time_elapsed = time.time() - start_time
      steps_per_sec = float(current_step) / time_elapsed
      eta_seconds = (max_steps - current_step) / steps_per_sec
      message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                     current_step / max_steps,
                     epoch + 1,
                     FLAGS.train_epochs,
                     steps_per_sec,
                     eta_seconds / 60,
                     time_elapsed / 60))
      if step % 20 == 0:
        logging.info(message)

    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      if step % 20 == 0:
        logging.info('Starting to run eval step %s of epoch: %s', step, epoch)
      test_start_time = time.time()
      test_step(test_iterator)
      ms_per_example = (time.time() - test_start_time) * 1e6 / batch_size
      metrics['test/ms_per_example'].update_state(ms_per_example)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    if enable_mixup:
      logging.info(
          'Test NLL (+ rescaling): %.4f, Accuracy (+ rescaling): %.2f%%',
          metrics['test/negative_log_likelihood+rescaling'].result(),
          metrics['test/accuracy+rescaling'].result() * 100)

    total_results = {name: metric.result() for name, metric in metrics.items()}
    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)

    for metric in metrics.values():
      metric.reset_states()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(os.path.join(
          FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)

  final_save_name = os.path.join(FLAGS.output_dir, 'model')
  model.save(final_save_name)
  logging.info('Saved model to %s', final_save_name)
Ejemplo n.º 23
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    per_core_batch_size = FLAGS.per_core_batch_size // FLAGS.ensemble_size
    batch_size = per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = APPROX_IMAGENET_TRAIN_IMAGES // batch_size
    steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    mixup_params = {
        'ensemble_size': FLAGS.ensemble_size,
        'mixup_alpha': FLAGS.mixup_alpha,
        'adaptive_mixup': FLAGS.adaptive_mixup,
        'num_classes': NUM_CLASSES,
    }
    train_builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                        one_hot=(FLAGS.mixup_alpha > 0),
                                        use_bfloat16=FLAGS.use_bfloat16,
                                        mixup_params=mixup_params,
                                        ensemble_size=FLAGS.ensemble_size)
    test_builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                       use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = train_builder.as_dataset(split=tfds.Split.TRAIN,
                                             batch_size=batch_size)
    clean_test_dataset = test_builder.as_dataset(split=tfds.Split.TEST,
                                                 batch_size=batch_size)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset)
    }
    if FLAGS.adaptive_mixup:
        imagenet_confidence_dataset = test_builder.as_dataset(
            split=tfds.Split.VALIDATION,
            batch_size=FLAGS.per_core_batch_size * FLAGS.num_cores)
        imagenet_confidence_dataset = (
            strategy.experimental_distribute_dataset(
                imagenet_confidence_dataset))
    if FLAGS.corruptions_interval > 0:
        corruption_types, max_intensity = utils.load_corrupted_test_info()
        for name in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset_name = '{0}_{1}'.format(name, intensity)
                dataset = utils.load_corrupted_test_dataset(
                    batch_size=batch_size,
                    corruption_name=name,
                    corruption_intensity=intensity,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets[dataset_name] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras ResNet-50 model')
        model = ub.models.resnet_batchensemble(
            input_shape=(224, 224, 3),
            num_classes=NUM_CLASSES,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            use_ensemble_bn=FLAGS.use_ensemble_bn,
            depth=FLAGS.depth)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Scale learning rate and decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 256
        learning_rate = utils.LearningRateSchedule(steps_per_epoch, base_lr,
                                                   FLAGS.train_epochs,
                                                   _LR_SCHEDULE)
        optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/accuracy':
            tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/member_accuracy_mean':
            (tf.keras.metrics.SparseCategoricalAccuracy()),
            'test/member_ece_mean':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
        }

        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
                    corrupt_metrics['test/member_acc_mean_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/member_ece_mean_{}'.format(
                        dataset_name)] = (um.ExpectedCalibrationError(
                            num_bins=FLAGS.num_bins))

        test_diversity = {}
        training_diversity = {}
        for i in range(FLAGS.ensemble_size):
            metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
            metrics['test/accuracy_member_{}'.format(i)] = (
                tf.keras.metrics.SparseCategoricalAccuracy())
        test_diversity = {
            'test/disagreement': tf.keras.metrics.Mean(),
            'test/average_kl': tf.keras.metrics.Mean(),
            'test/cosine_similarity': tf.keras.metrics.Mean(),
        }
        training_diversity = {
            'train/disagreement': tf.keras.metrics.Mean(),
            'train/average_kl': tf.keras.metrics.Mean(),
            'train/cosine_similarity': tf.keras.metrics.Mean(),
        }

        logging.info('Finished building Keras ResNet-50 model')

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            if FLAGS.adaptive_mixup:
                images = tf.identity(images)
            else:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])

            if FLAGS.adaptive_mixup:
                labels = tf.identity(labels)
            elif FLAGS.mixup_alpha > 0:
                labels = tf.tile(labels, [FLAGS.ensemble_size, 1])
            else:
                labels = tf.tile(labels, [FLAGS.ensemble_size])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                probs = tf.nn.softmax(logits)
                per_probs = tf.reshape(
                    probs,
                    tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
                diversity_results = um.average_pairwise_diversity(
                    per_probs, FLAGS.ensemble_size)

                if FLAGS.mixup_alpha > 0:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.categorical_crossentropy(
                            labels, logits, from_logits=True))
                else:
                    negative_log_likelihood = tf.reduce_mean(
                        tf.keras.losses.sparse_categorical_crossentropy(
                            labels, logits, from_logits=True))
                filtered_variables = []
                for var in model.trainable_variables:
                    # Apply l2 on the slow weights and bias terms. This excludes BN
                    # parameters and fast weight approximate posterior/prior parameters,
                    # but pay caution to their naming scheme.
                    if 'kernel' in var.name or 'bias' in var.name:
                        filtered_variables.append(tf.reshape(var, (-1, )))

                l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
                    tf.concat(filtered_variables, axis=0))
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate implementation.
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weights. This excludes BN
                    # and slow weights, but pay caution to the naming scheme.
                    if ('batch_norm' not in var.name
                            and 'kernel' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            if FLAGS.mixup_alpha > 0:
                labels = tf.argmax(labels, axis=-1)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)
            for k, v in diversity_results.items():
                training_diversity['train/' + k].update_state(v)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            logits = model(images, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)

            if dataset_name == 'clean':
                per_probs_tensor = tf.reshape(
                    probs,
                    tf.concat([[FLAGS.ensemble_size, -1], probs.shape[1:]], 0))
                diversity_results = um.average_pairwise_diversity(
                    per_probs_tensor, FLAGS.ensemble_size)
                for k, v in diversity_results.items():
                    test_diversity['test/' + k].update_state(v)

            per_probs = tf.split(probs,
                                 num_or_size_splits=FLAGS.ensemble_size,
                                 axis=0)
            probs = tf.reduce_mean(per_probs, axis=0)

            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

            for i in range(FLAGS.ensemble_size):
                member_probs = per_probs[i]
                if dataset_name == 'clean':
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)
                    metrics['test/member_accuracy_mean'].update_state(
                        labels, member_probs)
                    metrics['test/member_ece_mean'].update_state(
                        labels, member_probs)
                elif dataset_name != 'confidence_validation':
                    corrupt_metrics['test/member_acc_mean_{}'.format(
                        dataset_name)].update_state(labels, member_probs)
                    corrupt_metrics['test/member_ece_mean_{}'.format(
                        dataset_name)].update_state(labels, member_probs)

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            elif dataset_name != 'confidence_validation':
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

            if dataset_name == 'confidence_validation':
                return tf.stack(per_probs, 0), labels

        if dataset_name == 'confidence_validation':
            return strategy.run(step_fn, args=(next(iterator), ))
        else:
            strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        if FLAGS.adaptive_mixup:
            confidence_set_iterator = iter(imagenet_confidence_dataset)
            predictions_list = []
            labels_list = []
            for step in range(FLAGS.confidence_eval_iterations):
                temp_predictions, temp_labels = test_step(
                    confidence_set_iterator, 'confidence_validation')
                predictions_list.append(temp_predictions)
                labels_list.append(temp_labels)
            predictions = [
                tf.concat(list(predictions_list[i].values), axis=1)
                for i in range(len(predictions_list))
            ]
            labels = [
                tf.concat(list(labels_list[i].values), axis=0)
                for i in range(len(labels_list))
            ]
            predictions = tf.concat(predictions, axis=1)
            labels = tf.cast(tf.concat(labels, axis=0), tf.int64)

            def compute_acc_conf(preds, label, focus_class):
                class_preds = tf.boolean_mask(preds,
                                              label == focus_class,
                                              axis=1)
                class_pred_labels = tf.argmax(class_preds, axis=-1)
                confidence = tf.reduce_mean(
                    tf.reduce_max(class_preds, axis=-1), -1)
                accuracy = tf.reduce_mean(tf.cast(
                    class_pred_labels == focus_class, tf.float32),
                                          axis=-1)
                return accuracy - confidence

            calibration_per_class = [
                compute_acc_conf(predictions, labels, i)
                for i in range(NUM_CLASSES)
            ]
            calibration_per_class = tf.stack(calibration_per_class, axis=1)
            logging.info('calibration per class')
            logging.info(calibration_per_class)
            mixup_coeff = tf.where(calibration_per_class > 0, 1.0,
                                   FLAGS.mixup_alpha)
            mixup_coeff = tf.clip_by_value(mixup_coeff, 0, 1)
            logging.info('mixup coeff')
            logging.info(mixup_coeff)
            mixup_params['mixup_coeff'] = mixup_coeff
            builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                          one_hot=(FLAGS.mixup_alpha > 0),
                                          use_bfloat16=FLAGS.use_bfloat16,
                                          mixup_params=mixup_params)
            train_dataset = builder.as_dataset(split=tfds.Split.TRAIN,
                                               batch_size=batch_size)
            train_dataset = strategy.experimental_distribute_dataset(
                train_dataset)
            train_iterator = iter(train_dataset)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity,
                FLAGS.alexnet_errors_path)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        for i in range(FLAGS.ensemble_size):
            logging.info(
                'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                metrics['test/nll_member_{}'.format(i)].result(),
                metrics['test/accuracy_member_{}'.format(i)].result() * 100)

        total_metrics = metrics.copy()
        total_metrics.update(training_diversity)
        total_metrics.update(test_diversity)
        total_results = {
            name: metric.result()
            for name, metric in total_metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for _, metric in total_metrics.items():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_save_name = os.path.join(FLAGS.output_dir, 'model')
    model.save(final_save_name)
    logging.info('Saved model to %s', final_save_name)
Ejemplo n.º 24
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.experimental.TPUStrategy(resolver)

    train_input_fn = utils.load_input_fn(
        split=tfds.Split.TRAIN,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size,
        use_bfloat16=FLAGS.use_bfloat16)
    clean_test_input_fn = utils.load_input_fn(
        split=tfds.Split.TEST,
        name=FLAGS.dataset,
        batch_size=FLAGS.per_core_batch_size // FLAGS.ensemble_size,
        use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = strategy.experimental_distribute_datasets_from_function(
        train_input_fn)
    test_datasets = {
        'clean':
        strategy.experimental_distribute_datasets_from_function(
            clean_test_input_fn),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_input_fn = utils.load_cifar10_c_input_fn
        else:
            load_c_input_fn = functools.partial(utils.load_cifar100_c_input_fn,
                                                path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                input_fn = load_c_input_fn(
                    corruption_name=corruption,
                    corruption_intensity=intensity,
                    batch_size=FLAGS.per_core_batch_size //
                    FLAGS.ensemble_size,
                    use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_datasets_from_function(
                        input_fn))

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = ((FLAGS.per_core_batch_size // FLAGS.ensemble_size) *
                  FLAGS.num_cores)
    train_dataset_size = ds_info.splits['train'].num_examples
    steps_per_epoch = train_dataset_size // batch_size
    test_dataset_size = ds_info.splits['test'].num_examples
    steps_per_eval = test_dataset_size // batch_size
    num_classes = ds_info.features['label'].num_classes

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras model')
        model = ub.models.wide_resnet_rank1(
            input_shape=ds_info.features['image'].shape,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            alpha_initializer=FLAGS.alpha_initializer,
            gamma_initializer=FLAGS.gamma_initializer,
            alpha_regularizer=FLAGS.alpha_regularizer,
            gamma_regularizer=FLAGS.gamma_regularizer,
            use_additive_perturbation=FLAGS.use_additive_perturbation,
            ensemble_size=FLAGS.ensemble_size,
            random_sign_init=FLAGS.random_sign_init,
            dropout_rate=FLAGS.dropout_rate,
            prior_mean=FLAGS.prior_mean,
            prior_stddev=FLAGS.prior_stddev)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/kl': tf.keras.metrics.Mean(),
            'train/kl_scale': tf.keras.metrics.Mean(),
            'train/elbo': tf.keras.metrics.Mean(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/kl': tf.keras.metrics.Mean(),
            'test/elbo': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.ensemble_size > 1:
            for i in range(FLAGS.ensemble_size):
                metrics['test/nll_member_{}'.format(
                    i)] = tf.keras.metrics.Mean()
                metrics['test/accuracy_member_{}'.format(i)] = (
                    tf.keras.metrics.SparseCategoricalAccuracy())
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/kl_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/elbo_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    def compute_l2_loss(model):
        filtered_variables = []
        for var in model.trainable_variables:
            # Apply l2 on the BN parameters and bias terms. This
            # excludes only fast weight approximate posterior/prior parameters,
            # but pay caution to their naming scheme.
            if ('kernel' in var.name or 'batch_norm' in var.name
                    or 'bias' in var.name):
                filtered_variables.append(tf.reshape(var, (-1, )))
        l2_loss = FLAGS.l2 * 2 * tf.nn.l2_loss(
            tf.concat(filtered_variables, axis=0))
        return l2_loss

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
                labels = tf.tile(labels, [FLAGS.ensemble_size])

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = compute_l2_loss(model)
                kl = sum(model.losses) / train_dataset_size
                kl_scale = tf.cast(optimizer.iterations + 1, kl.dtype)
                kl_scale /= steps_per_epoch * FLAGS.kl_annealing_epochs
                kl_scale = tf.minimum(1., kl_scale)
                kl_loss = kl_scale * kl

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                loss = negative_log_likelihood + l2_loss + kl_loss
                scaled_loss = loss / strategy.num_replicas_in_sync
                elbo = -(negative_log_likelihood + l2_loss + kl)

            grads = tape.gradient(scaled_loss, model.trainable_variables)

            # Separate learning rate implementation.
            if FLAGS.fast_weight_lr_multiplier != 1.0:
                grads_and_vars = []
                for grad, var in zip(grads, model.trainable_variables):
                    # Apply different learning rate on the fast weight approximate
                    # posterior/prior parameters. This is excludes BN and slow weights,
                    # but pay caution to the naming scheme.
                    if ('kernel' not in var.name
                            and 'batch_norm' not in var.name
                            and 'bias' not in var.name):
                        grads_and_vars.append(
                            (grad * FLAGS.fast_weight_lr_multiplier, var))
                    else:
                        grads_and_vars.append((grad, var))
                optimizer.apply_gradients(grads_and_vars)
            else:
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/kl'].update_state(kl)
            metrics['train/kl_scale'].update_state(kl_scale)
            metrics['train/elbo'].update_state(elbo)
            metrics['train/loss'].update_state(loss)
            metrics['train/accuracy'].update_state(labels, probs)
            metrics['train/ece'].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            if FLAGS.ensemble_size > 1:
                images = tf.tile(images, [FLAGS.ensemble_size, 1, 1, 1])
            logits = tf.reshape([
                model(images, training=False)
                for _ in range(FLAGS.num_eval_samples)
            ], [FLAGS.num_eval_samples, FLAGS.ensemble_size, -1, num_classes])
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)

            if FLAGS.ensemble_size > 1:
                per_probs = tf.reduce_mean(probs,
                                           axis=0)  # marginalize samples
                for i in range(FLAGS.ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)

            # Negative log marginal likelihood computed in a numerically-stable way.
            labels_broadcasted = tf.broadcast_to(
                labels,
                [FLAGS.num_eval_samples, FLAGS.ensemble_size, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0, 1]) +
                tf.math.log(float(FLAGS.num_eval_samples *
                                  FLAGS.ensemble_size)))
            probs = tf.math.reduce_mean(probs, axis=[0, 1])  # marginalize

            l2_loss = compute_l2_loss(model)
            kl = sum(model.losses) / test_dataset_size
            elbo = -(negative_log_likelihood + l2_loss + kl)

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/kl'].update_state(kl)
                metrics['test/elbo'].update_state(elbo)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/kl_{}'.format(
                    dataset_name)].update_state(kl)
                corrupt_metrics['test/elbo_{}'.format(
                    dataset_name)].update_state(elbo)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)

        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_step(test_iterator, dataset_name)
            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        if FLAGS.ensemble_size > 1:
            for i in range(FLAGS.ensemble_size):
                logging.info(
                    'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
                    metrics['test/nll_member_{}'.format(i)].result(),
                    metrics['test/accuracy_member_{}'.format(i)].result() *
                    100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
Ejemplo n.º 25
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    train_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        batch_size=FLAGS.per_core_batch_size,
        eval_batch_size=FLAGS.per_core_batch_size,
        data_dir=FLAGS.data_dir,
        data_mode='ind')
    ind_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        batch_size=batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        data_dir=FLAGS.data_dir,
        data_mode='ind')
    ood_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        batch_size=batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        data_dir=FLAGS.data_dir,
        data_mode='ood')
    all_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        batch_size=batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        data_dir=FLAGS.data_dir,
        data_mode='all')

    dataset_builders = {
        'clean': ind_dataset_builder,
        'ood': ood_dataset_builder,
        'all': all_dataset_builder
    }

    train_dataset = train_dataset_builder.build(
        split=ub.datasets.base.Split.TRAIN)

    ds_info = train_dataset_builder.info
    feature_size = ds_info['feature_size']
    # num_classes is number of valid intents plus out-of-scope intent
    num_classes = ds_info['num_classes'] + 1

    steps_per_epoch = ds_info['num_train_examples'] // batch_size

    test_datasets = {}
    steps_per_eval = {}
    for dataset_name, dataset_builder in dataset_builders.items():
        test_datasets[dataset_name] = dataset_builder.build(
            split=ub.datasets.base.Split.TEST)
        steps_per_eval[dataset_name] = (
            dataset_builder.info['num_test_examples'] // FLAGS.eval_batch_size)

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building BERT model')
        logging.info('use_gp_layer=%s', FLAGS.use_gp_layer)
        logging.info('use_spec_norm_att=%s', FLAGS.use_spec_norm_att)
        logging.info('use_spec_norm_ffn=%s', FLAGS.use_spec_norm_ffn)
        logging.info('use_layer_norm_att=%s', FLAGS.use_layer_norm_att)
        logging.info('use_layer_norm_ffn=%s', FLAGS.use_layer_norm_ffn)

        bert_config_dir, bert_ckpt_dir = resolve_bert_ckpt_and_config_dir(
            FLAGS.bert_dir, FLAGS.bert_config_dir, FLAGS.bert_ckpt_dir)
        bert_config = bert_utils.create_config(bert_config_dir)

        gp_layer_kwargs = dict(num_inducing=FLAGS.gp_hidden_dim,
                               gp_kernel_scale=FLAGS.gp_scale,
                               gp_output_bias=FLAGS.gp_bias,
                               normalize_input=FLAGS.gp_input_normalization,
                               gp_cov_momentum=FLAGS.gp_cov_discount_factor,
                               gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty)
        spec_norm_kwargs = dict(iteration=FLAGS.spec_norm_iteration,
                                norm_multiplier=FLAGS.spec_norm_bound)

        model, bert_encoder = ub.models.SngpBertBuilder(
            num_classes=num_classes,
            bert_config=bert_config,
            gp_layer_kwargs=gp_layer_kwargs,
            spec_norm_kwargs=spec_norm_kwargs,
            use_gp_layer=FLAGS.use_gp_layer,
            use_spec_norm_att=FLAGS.use_spec_norm_att,
            use_spec_norm_ffn=FLAGS.use_spec_norm_ffn,
            use_layer_norm_att=FLAGS.use_layer_norm_att,
            use_layer_norm_ffn=FLAGS.use_layer_norm_ffn,
            use_spec_norm_plr=FLAGS.use_spec_norm_plr)
        optimizer = bert_utils.create_optimizer(
            FLAGS.base_learning_rate,
            steps_per_epoch=steps_per_epoch,
            epochs=FLAGS.train_epochs,
            warmup_proportion=FLAGS.warmup_proportion)

        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())

        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
        else:
            # load BERT from initial checkpoint
            bert_encoder, _, _ = bert_utils.load_bert_weight_from_ckpt(
                bert_model=bert_encoder,
                bert_ckpt_dir=bert_ckpt_dir,
                repl_patterns=ub.models.bert_sngp.CHECKPOINT_REPL_PATTERNS)
            logging.info('Loaded BERT checkpoint %s', bert_ckpt_dir)

    # Finally, define test metrics outside the accelerator scope for CPU eval.
    metrics.update({
        'test/negative_log_likelihood':
        tf.keras.metrics.Mean(),
        'test/accuracy':
        tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/stddev':
        tf.keras.metrics.Mean(),
    })
    for dataset_name, test_dataset in test_datasets.items():
        if dataset_name != 'clean':
            metrics.update({
                'test/nll_{}'.format(dataset_name):
                tf.keras.metrics.Mean(),
                'test/accuracy_{}'.format(dataset_name):
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'test/ece_{}'.format(dataset_name):
                um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
                'test/stddev_{}'.format(dataset_name):
                tf.keras.metrics.Mean(),
            })
    metrics.update({
        'test/auroc_all': tf.keras.metrics.AUC(curve='ROC'),
        'test/auprc_all': tf.keras.metrics.AUC(curve='PR')
    })

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            features, labels = bert_utils.create_feature_and_label(
                inputs, feature_size)

            with tf.GradientTape() as tape:
                # Set learning phase to enable dropout etc during training.
                logits = model(features, training=True)

                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract logits
                    logits, _ = logits
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            features, labels = bert_utils.create_feature_and_label(
                inputs, feature_size)

            # Compute ensemble prediction over Monte Carlo forward-pass samples.
            logits_list = []
            stddev_list = []
            for _ in range(FLAGS.num_mc_samples):
                logits = model(features, training=False)

                if isinstance(logits, tuple):
                    # If model returns a tuple of (logits, covmat), extract both.
                    logits, covmat = logits
                else:
                    covmat = tf.eye(FLAGS.eval_batch_size)

                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                    covmat = tf.cast(covmat, tf.float32)

                logits = ed.layers.utils.mean_field_logits(
                    logits,
                    covmat,
                    mean_field_factor=FLAGS.gp_mean_field_factor)
                stddev = tf.sqrt(tf.linalg.diag_part(covmat))

                logits_list.append(logits)
                stddev_list.append(stddev)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            stddev_list = tf.stack(stddev_list, axis=0)

            stddev = tf.reduce_mean(stddev_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_mc_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_mc_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
                metrics['test/stddev'].update_state(stddev)
            else:
                metrics['test/nll_{}'.format(dataset_name)].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy_{}'.format(dataset_name)].update_state(
                    labels, probs)
                metrics['test/ece_{}'.format(dataset_name)].update_state(
                    labels, probs)
                metrics['test/stddev_{}'.format(dataset_name)].update_state(
                    stddev)

            if dataset_name == 'all':
                ood_labels = tf.cast(labels == 150, labels.dtype)
                ood_probs = 1. - tf.reduce_max(probs, axis=-1)
                metrics['test/auroc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)
                metrics['test/auprc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)

        step_fn(next(iterator))

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        if epoch % FLAGS.evaluation_interval == 0:
            for dataset_name, test_dataset in test_datasets.items():
                test_iterator = iter(test_dataset)
                logging.info('Testing on dataset %s', dataset_name)
                for step in range(steps_per_eval[dataset_name]):
                    if step % 20 == 0:
                        logging.info(
                            'Starting to run eval step %s of epoch: %s', step,
                            epoch)
                    test_step(test_iterator, dataset_name)
                logging.info('Done with testing on %s', dataset_name)

            logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                         metrics['train/loss'].result(),
                         metrics['train/accuracy'].result() * 100)
            logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                         metrics['test/negative_log_likelihood'].result(),
                         metrics['test/accuracy'].result() * 100)
            total_results = {
                name: metric.result()
                for name, metric in metrics.items()
            }
            with summary_writer.as_default():
                for name, result in total_results.items():
                    tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    train_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='train', data_dir=FLAGS.data_dir, data_mode='ind')
    ind_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='ind')
    ood_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='ood')
    all_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='all')

    dataset_builders = {
        'clean': ind_dataset_builder,
        'ood': ood_dataset_builder,
        'all': all_dataset_builder
    }

    train_dataset = train_dataset_builder.load(batch_size=batch_size)

    ds_info = train_dataset_builder.tfds_info
    feature_size = ds_info.metadata['feature_size']
    # num_classes is number of valid intents plus out-of-scope intent
    num_classes = ds_info.features['intent_label'].num_classes + 1
    # vocab_size is total number of valid tokens plus the out-of-vocabulary token.
    vocab_size = ind_dataset_builder.tokenizer.num_words + 1

    steps_per_epoch = train_dataset_builder.num_examples // batch_size

    test_datasets = {}
    steps_per_eval = {}
    for dataset_name, dataset_builder in dataset_builders.items():
        test_datasets[dataset_name] = dataset_builder.load(
            batch_size=FLAGS.eval_batch_size)
        steps_per_eval[dataset_name] = (dataset_builder.num_examples //
                                        FLAGS.eval_batch_size)

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    premade_embedding_array = None
    if FLAGS.word_embedding_dir:
        with tf.io.gfile.GFile(FLAGS.word_embedding_dir,
                               'rb') as embedding_file:
            premade_embedding_array = np.load(embedding_file)

    with strategy.scope():
        logging.info('Building %s model', FLAGS.model_family)
        if FLAGS.model_family.lower() == 'textcnn':
            model = cnn_model.textcnn(
                filter_sizes=[int(x) for x in FLAGS.filter_sizes],
                num_filters=FLAGS.num_filters,
                num_classes=num_classes,
                feature_size=feature_size,
                vocab_size=vocab_size,
                embed_size=FLAGS.embedding_size,
                dropout_rate=FLAGS.dropout_rate,
                l2=FLAGS.l2,
                premade_embedding_arr=premade_embedding_array)
            optimizer = tf.keras.optimizers.Adam(FLAGS.base_learning_rate)
        elif FLAGS.model_family.lower() == 'bert':
            bert_config_dir, bert_ckpt_dir = resolve_bert_ckpt_and_config_dir(
                FLAGS.bert_dir, FLAGS.bert_config_dir, FLAGS.bert_ckpt_dir)
            bert_config = bert_utils.create_config(bert_config_dir)
            model, bert_encoder = ub.models.BertBuilder(
                num_classes=num_classes,
                max_seq_length=feature_size,
                bert_config=bert_config)
            optimizer = bert_utils.create_optimizer(
                FLAGS.base_learning_rate,
                steps_per_epoch=steps_per_epoch,
                epochs=FLAGS.train_epochs,
                warmup_proportion=FLAGS.warmup_proportion)
        else:
            raise ValueError(
                'model_family ({}) can only be TextCNN or BERT.'.format(
                    FLAGS.model_family))

        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())

        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }

        for dataset_name, test_dataset in test_datasets.items():
            if dataset_name != 'clean':
                metrics.update({
                    'test/nll_{}'.format(dataset_name):
                    tf.keras.metrics.Mean(),
                    'test/accuracy_{}'.format(dataset_name):
                    tf.keras.metrics.SparseCategoricalAccuracy(),
                    'test/ece_{}'.format(dataset_name):
                    um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
                })

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch
        elif FLAGS.model_family.lower() == 'bert':
            # load BERT from initial checkpoint
            bert_checkpoint = tf.train.Checkpoint(model=bert_encoder)
            bert_checkpoint.restore(
                bert_ckpt_dir).assert_existing_objects_matched()
            logging.info('Loaded BERT checkpoint %s', bert_ckpt_dir)

    # Finally, define OOD metrics outside the accelerator scope for CPU eval.
    metrics.update({
        'test/auroc_all': tf.keras.metrics.AUC(curve='ROC'),
        'test/auprc_all': tf.keras.metrics.AUC(curve='PR')
    })

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            features, labels = create_feature_and_label(
                inputs, feature_size, model_family=FLAGS.model_family)

            with tf.GradientTape() as tape:
                # Set learning phase to enable dropout etc during training.
                logits = model(features, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            features, labels = create_feature_and_label(
                inputs, feature_size, model_family=FLAGS.model_family)

            # Set learning phase to disable dropout etc during eval.
            logits = model(features, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.softmax(logits)
            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.sparse_categorical_crossentropy(labels, probs))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                metrics['test/nll_{}'.format(dataset_name)].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy_{}'.format(dataset_name)].update_state(
                    labels, probs)
                metrics['test/ece_{}'.format(dataset_name)].update_state(
                    labels, probs)

            if dataset_name == 'all':
                ood_labels = tf.cast(labels == 150, labels.dtype)
                ood_probs = 1. - tf.reduce_max(probs, axis=-1)
                metrics['test/auroc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)
                metrics['test/auprc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)

        step_fn(next(iterator))

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        if epoch % FLAGS.evaluation_interval == 0:
            for dataset_name, test_dataset in test_datasets.items():
                test_iterator = iter(test_dataset)
                logging.info('Testing on dataset %s', dataset_name)
                for step in range(steps_per_eval[dataset_name]):
                    if step % 20 == 0:
                        logging.info(
                            'Starting to run eval step %s of epoch: %s', step,
                            epoch)
                    test_step(test_iterator, dataset_name)
                logging.info('Done with testing on %s', dataset_name)

            logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                         metrics['train/loss'].result(),
                         metrics['train/accuracy'].result() * 100)
            logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                         metrics['test/negative_log_likelihood'].result(),
                         metrics['test/accuracy'].result() * 100)
            total_results = {
                name: metric.result()
                for name, metric in metrics.items()
            }
            with summary_writer.as_default():
                for name, result in total_results.items():
                    tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
Ejemplo n.º 27
0
def main(argv):
  del argv  # unused arg
  tf.io.gfile.makedirs(FLAGS.output_dir)
  logging.info('Saving checkpoints at %s', FLAGS.output_dir)
  tf.random.set_seed(FLAGS.seed)

  if FLAGS.use_gpu:
    logging.info('Use GPU')
    strategy = tf.distribute.MirroredStrategy()
  else:
    logging.info('Use TPU at %s',
                 FLAGS.tpu if FLAGS.tpu is not None else 'local')
    resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
    tf.config.experimental_connect_to_cluster(resolver)
    tf.tpu.experimental.initialize_tpu_system(resolver)
    strategy = tf.distribute.experimental.TPUStrategy(resolver)

  train_input_fn = utils.load_input_fn(
      split=tfds.Split.TRAIN,
      name=FLAGS.dataset,
      batch_size=FLAGS.per_core_batch_size // FLAGS.batch_repetitions,
      use_bfloat16=FLAGS.use_bfloat16)
  clean_test_input_fn = utils.load_input_fn(
      split=tfds.Split.TEST,
      name=FLAGS.dataset,
      batch_size=FLAGS.per_core_batch_size,
      use_bfloat16=FLAGS.use_bfloat16)
  train_dataset = strategy.experimental_distribute_datasets_from_function(
      train_input_fn)
  test_datasets = {
      'clean':
          strategy.experimental_distribute_datasets_from_function(
              clean_test_input_fn),
  }
  if FLAGS.corruptions_interval > 0:
    if FLAGS.dataset == 'cifar10':
      load_c_input_fn = utils.load_cifar10_c_input_fn
    else:
      load_c_input_fn = functools.partial(
          utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path)
    corruption_types, max_intensity = utils.load_corrupted_test_info(
        FLAGS.dataset)
    for corruption in corruption_types:
      for intensity in range(1, max_intensity + 1):
        input_fn = load_c_input_fn(
            corruption_name=corruption,
            corruption_intensity=intensity,
            batch_size=FLAGS.per_core_batch_size,
            use_bfloat16=FLAGS.use_bfloat16)
        test_datasets['{0}_{1}'.format(corruption, intensity)] = (
            strategy.experimental_distribute_datasets_from_function(input_fn))

  ds_info = tfds.builder(FLAGS.dataset).info
  train_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores // FLAGS.batch_repetitions
  test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  train_dataset_size = ds_info.splits['train'].num_examples
  steps_per_epoch = train_dataset_size // train_batch_size
  steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size
  num_classes = ds_info.features['label'].num_classes

  if FLAGS.use_bfloat16:
    policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
    tf.keras.mixed_precision.experimental.set_policy(policy)

  summary_writer = tf.summary.create_file_writer(
      os.path.join(FLAGS.output_dir, 'summaries'))

  with strategy.scope():
    logging.info('Building Keras model')
    model = cifar_model_reg_path.wide_resnet(
        input_shape=[FLAGS.ensemble_size] +
        list(ds_info.features['image'].shape),
        depth=28,
        width_multiplier=FLAGS.width_multiplier,
        num_classes=num_classes,
        ensemble_size=FLAGS.ensemble_size,
        l2=FLAGS.l2, l1=FLAGS.l1)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())
    # Linearly scale learning rate and the decay epochs by vanilla settings.
    base_lr = FLAGS.base_learning_rate * train_batch_size / 128
    lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                       for start_epoch_str in FLAGS.lr_decay_epochs]
    lr_schedule = utils.LearningRateSchedule(steps_per_epoch, base_lr,
                                             FLAGS.lr_decay_ratio,
                                             lr_decay_epochs,
                                             FLAGS.lr_warmup_epochs)
    optimizer = tf.keras.optimizers.SGD(
        lr_schedule, momentum=0.9, nesterov=True)
    metrics = {
        'train/negative_log_likelihood': tf.keras.metrics.Mean(),
        'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'train/loss': tf.keras.metrics.Mean(),
        'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }

    for log10_threshold in NNZ_LOG10_THRESHOLDS:
      metrics['train/nnz{}'.format(log10_threshold)] = tf.keras.metrics.Mean()

    if FLAGS.corruptions_interval > 0:
      corrupt_metrics = {}
      for intensity in range(1, max_intensity + 1):
        for corruption in corruption_types:
          dataset_name = '{0}_{1}'.format(corruption, intensity)
          corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
              tf.keras.metrics.Mean())
          corrupt_metrics['test/accuracy_{}'.format(dataset_name)] = (
              tf.keras.metrics.SparseCategoricalAccuracy())
          corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
              um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    for i in range(FLAGS.ensemble_size):
      metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
      metrics['test/accuracy_member_{}'.format(i)] = (
          tf.keras.metrics.SparseCategoricalAccuracy())
    test_diversity = {
        'test/disagreement': tf.keras.metrics.Mean(),
        'test/average_kl': tf.keras.metrics.Mean(),
        'test/cosine_similarity': tf.keras.metrics.Mean(),
    }

    checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
    latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
    initial_epoch = 0
    if latest_checkpoint:
      # checkpoint.restore must be within a strategy.scope() so that optimizer
      # slot variables are mirrored.
      checkpoint.restore(latest_checkpoint)
      logging.info('Loaded checkpoint %s', latest_checkpoint)
      initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

  @tf.function
  def train_step(iterator):
    """Training StepFn."""

    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      batch_size = tf.shape(images)[0]

      main_shuffle = tf.random.shuffle(tf.tile(
          tf.range(batch_size), [FLAGS.batch_repetitions]))
      to_shuffle = tf.cast(tf.cast(tf.shape(main_shuffle)[0], tf.float32)
                           * (1. - FLAGS.input_repetition_probability),
                           tf.int32)
      shuffle_indices = [
          tf.concat([tf.random.shuffle(main_shuffle[:to_shuffle]),
                     main_shuffle[to_shuffle:]], axis=0)
          for _ in range(FLAGS.ensemble_size)]
      images = tf.stack([tf.gather(images, indices, axis=0)
                         for indices in shuffle_indices], axis=1)
      labels = tf.stack([tf.gather(labels, indices, axis=0)
                         for indices in shuffle_indices], axis=1)

      with tf.GradientTape() as tape:
        logits = model(images, training=True)
        if FLAGS.use_bfloat16:
          logits = tf.cast(logits, tf.float32)

        negative_log_likelihood = tf.reduce_mean(tf.reduce_sum(
            tf.keras.losses.sparse_categorical_crossentropy(
                labels, logits, from_logits=True), axis=1))

        regularization = sum(model.losses)

        # Scale the loss given the TPUStrategy will reduce sum all gradients.
        loss = negative_log_likelihood + regularization
        scaled_loss = loss / strategy.num_replicas_in_sync

      grads = tape.gradient(scaled_loss, model.trainable_variables)
      optimizer.apply_gradients(zip(grads, model.trainable_variables))

      probs = tf.nn.softmax(tf.reshape(logits, [-1, num_classes]))
      flat_labels = tf.reshape(labels, [-1])
      metrics['train/ece'].update_state(flat_labels, probs)
      metrics['train/loss'].update_state(loss)
      metrics['train/negative_log_likelihood'].update_state(
          negative_log_likelihood)
      metrics['train/accuracy'].update_state(flat_labels, probs)

      for log10_threshold in NNZ_LOG10_THRESHOLDS:
        nnz = compute_nnz(model, threshold=10.**log10_threshold)
        metrics['train/nnz{}'.format(log10_threshold)].update_state(nnz)

    strategy.run(step_fn, args=(next(iterator),))

  @tf.function
  def test_step(iterator, dataset_name):
    """Evaluation StepFn."""

    def step_fn(inputs):
      """Per-Replica StepFn."""
      images, labels = inputs
      images = tf.tile(
          tf.expand_dims(images, 1), [1, FLAGS.ensemble_size, 1, 1, 1])
      logits = model(images, training=False)
      if FLAGS.use_bfloat16:
        logits = tf.cast(logits, tf.float32)
      probs = tf.nn.softmax(logits)

      if dataset_name == 'clean':
        per_probs = tf.transpose(probs, perm=[1, 0, 2])
        diversity_results = um.average_pairwise_diversity(
            per_probs, FLAGS.ensemble_size)
        for k, v in diversity_results.items():
          test_diversity['test/' + k].update_state(v)

      for i in range(FLAGS.ensemble_size):
        member_probs = probs[:, i]
        member_loss = tf.keras.losses.sparse_categorical_crossentropy(
            labels, member_probs)
        metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
        metrics['test/accuracy_member_{}'.format(i)].update_state(
            labels, member_probs)

      # Negative log marginal likelihood computed in a numerically-stable way.
      labels_tiled = tf.tile(
          tf.expand_dims(labels, 1), [1, FLAGS.ensemble_size])
      log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
          labels_tiled, logits, from_logits=True)
      negative_log_likelihood = tf.reduce_mean(
          -tf.reduce_logsumexp(log_likelihoods, axis=[1]) +
          tf.math.log(float(FLAGS.ensemble_size)))
      probs = tf.math.reduce_mean(probs, axis=1)  # marginalize

      if dataset_name == 'clean':
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)
      else:
        corrupt_metrics['test/nll_{}'.format(dataset_name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(dataset_name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(dataset_name)].update_state(
            labels, probs)

    strategy.run(step_fn, args=(next(iterator),))

  metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

  train_iterator = iter(train_dataset)
  start_time = time.time()
  for epoch in range(initial_epoch, FLAGS.train_epochs):
    logging.info('Starting to run epoch: %s', epoch)

    for step in range(steps_per_epoch):
      train_step(train_iterator)

      current_step = epoch * steps_per_epoch + (step + 1)
      max_steps = steps_per_epoch * (FLAGS.train_epochs)
      time_elapsed = time.time() - start_time
      steps_per_sec = float(current_step) / time_elapsed
      eta_seconds = (max_steps - current_step) / steps_per_sec
      message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                 'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                     current_step / max_steps, epoch + 1, FLAGS.train_epochs,
                     steps_per_sec, eta_seconds / 60, time_elapsed / 60))
      if step % 20 == 0:
        logging.info(message)

    datasets_to_evaluate = {'clean': test_datasets['clean']}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      datasets_to_evaluate = test_datasets
    for dataset_name, test_dataset in datasets_to_evaluate.items():
      test_iterator = iter(test_dataset)
      logging.info('Testing on dataset %s', dataset_name)
      for step in range(steps_per_eval):
        if step % 20 == 0:
          logging.info('Starting to run eval step %s of epoch: %s', step, epoch)
        test_start_time = time.time()
        test_step(test_iterator, dataset_name)
        ms_per_example = (time.time() - test_start_time) * 1e6 / test_batch_size
        metrics['test/ms_per_example'].update_state(ms_per_example)
      logging.info('Done with testing on %s', dataset_name)

    corrupt_results = {}
    if (FLAGS.corruptions_interval > 0 and
        (epoch + 1) % FLAGS.corruptions_interval == 0):
      corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                        corruption_types,
                                                        max_intensity)

    logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                 metrics['train/loss'].result(),
                 metrics['train/accuracy'].result() * 100)
    logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                 metrics['test/negative_log_likelihood'].result(),
                 metrics['test/accuracy'].result() * 100)
    for i in range(FLAGS.ensemble_size):
      logging.info(
          'Member %d Test Loss: %.4f, Accuracy: %.2f%%', i,
          metrics['test/nll_member_{}'.format(i)].result(),
          metrics['test/accuracy_member_{}'.format(i)].result() * 100)

    metrics.update(test_diversity)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    with summary_writer.as_default():
      for name, result in total_results.items():
        tf.summary.scalar(name, result, step=epoch + 1)

    for metric in metrics.values():
      metric.reset_states()

    if (FLAGS.checkpoint_interval > 0 and
        (epoch + 1) % FLAGS.checkpoint_interval == 0):
      checkpoint_name = checkpoint.save(
          os.path.join(FLAGS.output_dir, 'checkpoint'))
      logging.info('Saved checkpoint to %s', checkpoint_name)

  final_checkpoint_name = checkpoint.save(
      os.path.join(FLAGS.output_dir, 'checkpoint'))
  logging.info('Saved last checkpoint to %s', final_checkpoint_name)
Ejemplo n.º 28
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    # Initialize distribution strategy on flag-specified accelerator
    strategy = utils.init_distribution_strategy(FLAGS.force_use_cpu,
                                                FLAGS.use_gpu, FLAGS.tpu)

    train_batch_size = FLAGS.train_batch_size * FLAGS.num_cores
    eval_batch_size = FLAGS.eval_batch_size * FLAGS.num_cores

    # As per the Kaggle challenge, we have split sizes:
    # train: 35,126
    # validation: 10,906 (currently unused)
    # test: 42,670
    ds_info = tfds.builder('diabetic_retinopathy_detection').info
    steps_per_epoch = ds_info.splits['train'].num_examples // train_batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // eval_batch_size

    dataset_train_builder = ub.datasets.get('diabetic_retinopathy_detection',
                                            split='train',
                                            data_dir=FLAGS.data_dir)
    dataset_train = dataset_train_builder.load(batch_size=train_batch_size)
    dataset_train = strategy.experimental_distribute_dataset(dataset_train)
    dataset_test_builder = ub.datasets.get('diabetic_retinopathy_detection',
                                           split='test',
                                           data_dir=FLAGS.data_dir)
    dataset_test = dataset_test_builder.load(batch_size=eval_batch_size)
    dataset_test = strategy.experimental_distribute_dataset(dataset_test)

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building Keras ResNet-50 deterministic model.')
        model = ub.models.resnet50_deterministic(
            input_shape=utils.load_input_shape(dataset_train),
            num_classes=1)  # binary classification task
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())

        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = (FLAGS.base_learning_rate *
                   train_batch_size) / DEFAULT_TRAIN_BATCH_SIZE
        lr_decay_epochs = [
            (int(start_epoch_str) * FLAGS.train_epochs) // DEFAULT_NUM_EPOCHS
            for start_epoch_str in FLAGS.lr_decay_epochs
        ]

        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.BinaryAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),  # NLL + L2
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.BinaryAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
        }
        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope()
            # so that optimizer slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    # Finally, define OOD metrics outside the accelerator scope for CPU eval.
    metrics.update({
        'train/auc': tf.keras.metrics.AUC(),
        'test/auc': tf.keras.metrics.AUC()
    })

    @tf.function
    def train_step(iterator):
        """Training step function."""
        def step_fn(inputs):
            """Per-replica step function."""
            images = inputs['features']
            labels = inputs['labels']

            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.binary_crossentropy(y_true=tf.expand_dims(
                        labels, axis=-1),
                                                        y_pred=logits,
                                                        from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + (FLAGS.l2 * l2_loss)

                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))
            probs = tf.squeeze(tf.nn.sigmoid(logits))
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, probs)
            metrics['train/auc'].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator):
        """Evaluation step function."""
        def step_fn(inputs):
            """Per-replica step function."""
            images = inputs['features']
            labels = inputs['labels']
            logits = model(images, training=False)
            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)

            negative_log_likelihood = tf.reduce_mean(
                tf.keras.losses.binary_crossentropy(y_true=tf.expand_dims(
                    labels, axis=-1),
                                                    y_pred=logits,
                                                    from_logits=True))
            probs = tf.squeeze(tf.nn.sigmoid(logits))
            metrics['test/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['test/accuracy'].update_state(labels, probs)
            metrics['test/auc'].update_state(labels, probs)
            metrics['test/ece'].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})
    start_time = time.time()

    for epoch in range(initial_epoch, FLAGS.train_epochs):
        train_iterator = iter(dataset_train)
        test_iterator = iter(dataset_test)
        logging.info('Starting to run epoch: %s', epoch + 1)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        for step in range(steps_per_eval):
            if step % 20 == 0:
                logging.info('Starting to run eval step %s of epoch: %s', step,
                             epoch + 1)

            test_start_time = time.time()
            test_step(test_iterator)
            ms_per_example = (time.time() -
                              test_start_time) * 1e6 / eval_batch_size
            metrics['test/ms_per_example'].update_state(ms_per_example)

        logging.info(
            'Train Loss (NLL+L2): %.4f, Accuracy: %.2f%%, AUC: %.2f%%, ECE: %.2f%%',
            metrics['train/loss'].result(),
            metrics['train/accuracy'].result() * 100,
            metrics['train/auc'].result() * 100,
            metrics['train/ece'].result() * 100)
        logging.info(
            'Test NLL: %.4f, Accuracy: %.2f%%, AUC: %.2f%%, ECE: %.2f%%',
            metrics['test/negative_log_likelihood'].result(),
            metrics['test/accuracy'].result() * 100,
            metrics['test/auc'].result() * 100,
            metrics['test/ece'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)

            # TODO(nband): debug checkpointing
            # Also save Keras model, due to checkpoint.save issue
            keras_model_name = os.path.join(FLAGS.output_dir,
                                            f'keras_model_{epoch + 1}')
            model.save(keras_model_name)
            logging.info('Saved keras model to %s', keras_model_name)

    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)

    keras_model_name = os.path.join(FLAGS.output_dir,
                                    f'keras_model_{FLAGS.train_epochs}')
    model.save(keras_model_name)
    logging.info('Saved keras model to %s', keras_model_name)
Ejemplo n.º 29
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Saving checkpoints at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = (FLAGS.per_core_batch_size * FLAGS.num_cores //
                  FLAGS.num_dropout_samples_training)
    test_batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_epoch = ds_info.splits['train'].num_examples // batch_size
    steps_per_eval = ds_info.splits['test'].num_examples // test_batch_size
    num_classes = ds_info.features['label'].num_classes

    train_dataset = utils.load_dataset(split=tfds.Split.TRAIN,
                                       name=FLAGS.dataset,
                                       batch_size=batch_size,
                                       use_bfloat16=FLAGS.use_bfloat16)
    clean_test_dataset = utils.load_dataset(split=tfds.Split.TEST,
                                            name=FLAGS.dataset,
                                            batch_size=test_batch_size,
                                            use_bfloat16=FLAGS.use_bfloat16)
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    test_datasets = {
        'clean': strategy.experimental_distribute_dataset(clean_test_dataset),
    }
    if FLAGS.corruptions_interval > 0:
        if FLAGS.dataset == 'cifar10':
            load_c_dataset = utils.load_cifar10_c
        else:
            load_c_dataset = functools.partial(utils.load_cifar100_c,
                                               path=FLAGS.cifar100_c_path)
        corruption_types, max_intensity = utils.load_corrupted_test_info(
            FLAGS.dataset)
        for corruption in corruption_types:
            for intensity in range(1, max_intensity + 1):
                dataset = load_c_dataset(corruption_name=corruption,
                                         corruption_intensity=intensity,
                                         batch_size=test_batch_size,
                                         use_bfloat16=FLAGS.use_bfloat16)
                test_datasets['{0}_{1}'.format(corruption, intensity)] = (
                    strategy.experimental_distribute_dataset(dataset))

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building ResNet model')
        model = ub.models.wide_resnet_dropout(
            input_shape=ds_info.features['image'].shape,
            depth=28,
            width_multiplier=10,
            num_classes=num_classes,
            l2=FLAGS.l2,
            dropout_rate=FLAGS.dropout_rate,
            residual_dropout=FLAGS.residual_dropout,
            filterwise_dropout=FLAGS.filterwise_dropout)
        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())
        # Linearly scale learning rate and the decay epochs by vanilla settings.
        base_lr = FLAGS.base_learning_rate * batch_size / 128
        lr_decay_epochs = [(int(start_epoch_str) * FLAGS.train_epochs) // 200
                           for start_epoch_str in FLAGS.lr_decay_epochs]
        lr_schedule = utils.LearningRateSchedule(
            steps_per_epoch,
            base_lr,
            decay_ratio=FLAGS.lr_decay_ratio,
            decay_epochs=lr_decay_epochs,
            warmup_epochs=FLAGS.lr_warmup_epochs)
        optimizer = tf.keras.optimizers.SGD(lr_schedule,
                                            momentum=0.9,
                                            nesterov=True)
        metrics = {
            'train/negative_log_likelihood': tf.keras.metrics.Mean(),
            'train/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'train/loss': tf.keras.metrics.Mean(),
            'train/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/negative_log_likelihood': tf.keras.metrics.Mean(),
            'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
            'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
        }
        if FLAGS.corruptions_interval > 0:
            corrupt_metrics = {}
            for intensity in range(1, max_intensity + 1):
                for corruption in corruption_types:
                    dataset_name = '{0}_{1}'.format(corruption, intensity)
                    corrupt_metrics['test/nll_{}'.format(dataset_name)] = (
                        tf.keras.metrics.Mean())
                    corrupt_metrics['test/accuracy_{}'.format(
                        dataset_name)] = (
                            tf.keras.metrics.SparseCategoricalAccuracy())
                    corrupt_metrics['test/ece_{}'.format(dataset_name)] = (
                        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy() // steps_per_epoch

    @tf.function
    def train_step(iterator):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs
            images = tf.tile(images,
                             [FLAGS.num_dropout_samples_training, 1, 1, 1])
            labels = tf.tile(labels, [FLAGS.num_dropout_samples_training])
            with tf.GradientTape() as tape:
                logits = model(images, training=True)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                negative_log_likelihood = tf.reduce_mean(
                    tf.keras.losses.sparse_categorical_crossentropy(
                        labels, logits, from_logits=True))
                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.softmax(logits)
            metrics['train/ece'].update_state(labels, probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, logits)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            images, labels = inputs

            logits_list = []
            for _ in range(FLAGS.num_dropout_samples):
                logits = model(images, training=False)
                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)
                logits_list.append(logits)

            # Logits dimension is (num_samples, batch_size, num_classes).
            logits_list = tf.stack(logits_list, axis=0)
            probs_list = tf.nn.softmax(logits_list)
            probs = tf.reduce_mean(probs_list, axis=0)

            labels_broadcasted = tf.broadcast_to(
                labels, [FLAGS.num_dropout_samples, labels.shape[0]])
            log_likelihoods = -tf.keras.losses.sparse_categorical_crossentropy(
                labels_broadcasted, logits_list, from_logits=True)
            negative_log_likelihood = tf.reduce_mean(
                -tf.reduce_logsumexp(log_likelihoods, axis=[0]) +
                tf.math.log(float(FLAGS.num_dropout_samples)))

            if dataset_name == 'clean':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(
                    dataset_name)].update_state(negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(
                    dataset_name)].update_state(labels, probs)
                corrupt_metrics['test/ece_{}'.format(
                    dataset_name)].update_state(labels, probs)

        strategy.run(step_fn, args=(next(iterator), ))

    metrics.update({'test/ms_per_example': tf.keras.metrics.Mean()})

    train_iterator = iter(train_dataset)
    start_time = time.time()
    for epoch in range(initial_epoch, FLAGS.train_epochs):
        logging.info('Starting to run epoch: %s', epoch)
        for step in range(steps_per_epoch):
            train_step(train_iterator)

            current_step = epoch * steps_per_epoch + (step + 1)
            max_steps = steps_per_epoch * FLAGS.train_epochs
            time_elapsed = time.time() - start_time
            steps_per_sec = float(current_step) / time_elapsed
            eta_seconds = (max_steps - current_step) / steps_per_sec
            message = ('{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                       'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                           current_step / max_steps, epoch + 1,
                           FLAGS.train_epochs, steps_per_sec, eta_seconds / 60,
                           time_elapsed / 60))
            if step % 20 == 0:
                logging.info(message)

        datasets_to_evaluate = {'clean': test_datasets['clean']}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            datasets_to_evaluate = test_datasets
        for dataset_name, test_dataset in datasets_to_evaluate.items():
            test_iterator = iter(test_dataset)
            logging.info('Testing on dataset %s', dataset_name)
            for step in range(steps_per_eval):
                if step % 20 == 0:
                    logging.info('Starting to run eval step %s of epoch: %s',
                                 step, epoch)
                test_start_time = time.time()
                test_step(test_iterator, dataset_name)
                ms_per_example = (time.time() -
                                  test_start_time) * 1e6 / batch_size
                metrics['test/ms_per_example'].update_state(ms_per_example)

            logging.info('Done with testing on %s', dataset_name)

        corrupt_results = {}
        if (FLAGS.corruptions_interval > 0
                and (epoch + 1) % FLAGS.corruptions_interval == 0):
            corrupt_results = utils.aggregate_corrupt_metrics(
                corrupt_metrics, corruption_types, max_intensity)

        logging.info('Train Loss: %.4f, Accuracy: %.2f%%',
                     metrics['train/loss'].result(),
                     metrics['train/accuracy'].result() * 100)
        logging.info('Test NLL: %.4f, Accuracy: %.2f%%',
                     metrics['test/negative_log_likelihood'].result(),
                     metrics['test/accuracy'].result() * 100)
        total_results = {
            name: metric.result()
            for name, metric in metrics.items()
        }
        total_results.update(corrupt_results)
        with summary_writer.as_default():
            for name, result in total_results.items():
                tf.summary.scalar(name, result, step=epoch + 1)

        for metric in metrics.values():
            metric.reset_states()

        if (FLAGS.checkpoint_interval > 0
                and (epoch + 1) % FLAGS.checkpoint_interval == 0):
            checkpoint_name = checkpoint.save(
                os.path.join(FLAGS.output_dir, 'checkpoint'))
            logging.info('Saved checkpoint to %s', checkpoint_name)
    final_checkpoint_name = checkpoint.save(
        os.path.join(FLAGS.output_dir, 'checkpoint'))
    logging.info('Saved last checkpoint to %s', final_checkpoint_name)
Ejemplo n.º 30
0
def main(argv):
    del argv  # unused arg
    tf.io.gfile.makedirs(FLAGS.output_dir)
    logging.info('Model checkpoint will be saved at %s', FLAGS.output_dir)
    tf.random.set_seed(FLAGS.seed)

    if FLAGS.use_gpu:
        logging.info('Use GPU')
        strategy = tf.distribute.MirroredStrategy()
    else:
        logging.info('Use TPU at %s',
                     FLAGS.tpu if FLAGS.tpu is not None else 'local')
        resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=FLAGS.tpu)
        tf.config.experimental_connect_to_cluster(resolver)
        tf.tpu.experimental.initialize_tpu_system(resolver)
        strategy = tf.distribute.TPUStrategy(resolver)

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    test_batch_size = batch_size
    data_buffer_size = batch_size * 10

    train_dataset_builder = ds.WikipediaToxicityDataset(
        split='train',
        data_dir=FLAGS.in_dataset_dir,
        shuffle_buffer_size=data_buffer_size)
    ind_dataset_builder = ds.WikipediaToxicityDataset(
        split='test',
        data_dir=FLAGS.in_dataset_dir,
        shuffle_buffer_size=data_buffer_size)
    ood_dataset_builder = ds.CivilCommentsDataset(
        split='test',
        data_dir=FLAGS.ood_dataset_dir,
        shuffle_buffer_size=data_buffer_size)
    ood_identity_dataset_builder = ds.CivilCommentsIdentitiesDataset(
        split='test',
        data_dir=FLAGS.identity_dataset_dir,
        shuffle_buffer_size=data_buffer_size)

    train_dataset_builders = {
        'wikipedia_toxicity_subtypes': train_dataset_builder
    }
    test_dataset_builders = {
        'ind': ind_dataset_builder,
        'ood': ood_dataset_builder,
        'ood_identity': ood_identity_dataset_builder,
    }

    class_weight = utils.create_class_weight(train_dataset_builders,
                                             test_dataset_builders)
    logging.info('class_weight: %s', str(class_weight))

    ds_info = train_dataset_builder.tfds_info
    # Positive and negative classes.
    num_classes = ds_info.metadata['num_classes']

    train_datasets = {}
    dataset_steps_per_epoch = {}
    total_steps_per_epoch = 0
    for dataset_name, dataset_builder in train_dataset_builders.items():
        train_datasets[dataset_name] = dataset_builder.load(
            batch_size=batch_size)
        dataset_steps_per_epoch[dataset_name] = (
            dataset_builder.num_examples // batch_size)
        total_steps_per_epoch += dataset_steps_per_epoch[dataset_name]

    test_datasets = {}
    steps_per_eval = {}
    for dataset_name, dataset_builder in test_dataset_builders.items():
        test_datasets[dataset_name] = dataset_builder.load(
            batch_size=test_batch_size)
        steps_per_eval[dataset_name] = (dataset_builder.num_examples //
                                        test_batch_size)

    if FLAGS.use_bfloat16:
        policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
        tf.keras.mixed_precision.experimental.set_policy(policy)

    summary_writer = tf.summary.create_file_writer(
        os.path.join(FLAGS.output_dir, 'summaries'))

    with strategy.scope():
        logging.info('Building %s model', FLAGS.model_family)

        bert_config_dir, bert_ckpt_dir = utils.resolve_bert_ckpt_and_config_dir(
            FLAGS.bert_model_type, FLAGS.bert_dir, FLAGS.bert_config_dir,
            FLAGS.bert_ckpt_dir)
        bert_config = utils.create_config(bert_config_dir)
        model, bert_encoder = ub.models.DropoutBertBuilder(
            num_classes=num_classes,
            bert_config=bert_config,
            use_mc_dropout_mha=FLAGS.use_mc_dropout_mha,
            use_mc_dropout_att=FLAGS.use_mc_dropout_att,
            use_mc_dropout_ffn=FLAGS.use_mc_dropout_ffn,
            use_mc_dropout_output=FLAGS.use_mc_dropout_output,
            channel_wise_dropout_mha=FLAGS.channel_wise_dropout_mha,
            channel_wise_dropout_att=FLAGS.channel_wise_dropout_att,
            channel_wise_dropout_ffn=FLAGS.channel_wise_dropout_ffn)

        optimizer = utils.create_optimizer(
            FLAGS.base_learning_rate,
            steps_per_epoch=total_steps_per_epoch,
            epochs=FLAGS.train_epochs,
            warmup_proportion=FLAGS.warmup_proportion)

        logging.info('Model input shape: %s', model.input_shape)
        logging.info('Model output shape: %s', model.output_shape)
        logging.info('Model number of weights: %s', model.count_params())

        metrics = {
            'train/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'train/accuracy':
            tf.keras.metrics.Accuracy(),
            'train/accuracy_weighted':
            tf.keras.metrics.Accuracy(),
            'train/auroc':
            tf.keras.metrics.AUC(),
            'train/loss':
            tf.keras.metrics.Mean(),
            'train/ece':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'train/precision':
            tf.keras.metrics.Precision(),
            'train/recall':
            tf.keras.metrics.Recall(),
            'train/f1':
            tfa_metrics.F1Score(num_classes=num_classes,
                                average='micro',
                                threshold=FLAGS.ece_label_threshold),
        }

        checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
        if FLAGS.prediction_mode:
            latest_checkpoint = tf.train.latest_checkpoint(
                FLAGS.eval_checkpoint_dir)
        else:
            latest_checkpoint = tf.train.latest_checkpoint(FLAGS.output_dir)
        initial_epoch = 0
        if latest_checkpoint:
            # checkpoint.restore must be within a strategy.scope() so that optimizer
            # slot variables are mirrored.
            checkpoint.restore(latest_checkpoint)
            logging.info('Loaded checkpoint %s', latest_checkpoint)
            initial_epoch = optimizer.iterations.numpy(
            ) // total_steps_per_epoch
        elif FLAGS.model_family.lower() == 'bert':
            # load BERT from initial checkpoint
            bert_checkpoint = tf.train.Checkpoint(model=bert_encoder)
            bert_checkpoint.restore(
                bert_ckpt_dir).assert_existing_objects_matched()
            logging.info('Loaded BERT checkpoint %s', bert_ckpt_dir)

        metrics.update({
            'test/negative_log_likelihood':
            tf.keras.metrics.Mean(),
            'test/auroc':
            tf.keras.metrics.AUC(curve='ROC'),
            'test/aupr':
            tf.keras.metrics.AUC(curve='PR'),
            'test/brier':
            tf.keras.metrics.MeanSquaredError(),
            'test/brier_weighted':
            tf.keras.metrics.MeanSquaredError(),
            'test/ece':
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
            'test/acc':
            tf.keras.metrics.Accuracy(),
            'test/acc_weighted':
            tf.keras.metrics.Accuracy(),
            'test/eval_time':
            tf.keras.metrics.Mean(),
            'test/precision':
            tf.keras.metrics.Precision(),
            'test/recall':
            tf.keras.metrics.Recall(),
            'test/f1':
            tfa_metrics.F1Score(num_classes=num_classes,
                                average='micro',
                                threshold=FLAGS.ece_label_threshold),
        })
        for fraction in FLAGS.fractions:
            metrics.update({
                'test_collab_acc/collab_acc_{}'.format(fraction):
                um.OracleCollaborativeAccuracy(fraction=float(fraction),
                                               num_bins=FLAGS.num_bins)
            })
        for dataset_name, test_dataset in test_datasets.items():
            if dataset_name != 'ind':
                metrics.update({
                    'test/nll_{}'.format(dataset_name):
                    tf.keras.metrics.Mean(),
                    'test/auroc_{}'.format(dataset_name):
                    tf.keras.metrics.AUC(curve='ROC'),
                    'test/aupr_{}'.format(dataset_name):
                    tf.keras.metrics.AUC(curve='PR'),
                    'test/brier_{}'.format(dataset_name):
                    tf.keras.metrics.MeanSquaredError(),
                    'test/brier_weighted_{}'.format(dataset_name):
                    tf.keras.metrics.MeanSquaredError(),
                    'test/ece_{}'.format(dataset_name):
                    um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
                    'test/acc_{}'.format(dataset_name):
                    tf.keras.metrics.Accuracy(),
                    'test/acc_weighted_{}'.format(dataset_name):
                    tf.keras.metrics.Accuracy(),
                    'test/eval_time_{}'.format(dataset_name):
                    tf.keras.metrics.Mean(),
                    'test/precision_{}'.format(dataset_name):
                    tf.keras.metrics.Precision(),
                    'test/recall_{}'.format(dataset_name):
                    tf.keras.metrics.Recall(),
                    'test/f1_{}'.format(dataset_name):
                    tfa_metrics.F1Score(num_classes=num_classes,
                                        average='micro',
                                        threshold=FLAGS.ece_label_threshold),
                })
                for fraction in FLAGS.fractions:
                    metrics.update({
                        'test_collab_acc/collab_acc_{}_{}'.format(
                            fraction, dataset_name):
                        um.OracleCollaborativeAccuracy(
                            fraction=float(fraction), num_bins=FLAGS.num_bins)
                    })

    @tf.function
    def generate_sample_weight(labels, class_weight, label_threshold=0.7):
        """Generate sample weight for weighted accuracy calculation."""
        if label_threshold != 0.7:
            logging.warning(
                'The class weight was based on `label_threshold` = 0.7, '
                'and weighted accuracy/brier will be meaningless if '
                '`label_threshold` is not equal to this value, which is '
                'recommended by Jigsaw Conversation AI team.')
        labels_int = tf.cast(labels > label_threshold, tf.int32)
        sample_weight = tf.gather(class_weight, labels_int)
        return sample_weight

    @tf.function
    def train_step(iterator, dataset_name):
        """Training StepFn."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            features, labels, _ = utils.create_feature_and_label(inputs)

            with tf.GradientTape() as tape:
                logits = model(features, training=True)

                if FLAGS.use_bfloat16:
                    logits = tf.cast(logits, tf.float32)

                loss_logits = tf.squeeze(logits, axis=1)
                if FLAGS.loss_type == 'cross_entropy':
                    logging.info('Using cross entropy loss')
                    negative_log_likelihood = tf.nn.sigmoid_cross_entropy_with_logits(
                        labels, loss_logits)
                elif FLAGS.loss_type == 'focal_cross_entropy':
                    logging.info('Using focal cross entropy loss')
                    negative_log_likelihood = tfa_losses.sigmoid_focal_crossentropy(
                        labels,
                        loss_logits,
                        alpha=FLAGS.focal_loss_alpha,
                        gamma=FLAGS.focal_loss_gamma,
                        from_logits=True)
                elif FLAGS.loss_type == 'mse':
                    logging.info('Using mean squared error loss')
                    loss_probs = tf.nn.sigmoid(loss_logits)
                    negative_log_likelihood = tf.keras.losses.mean_squared_error(
                        labels, loss_probs)
                elif FLAGS.loss_type == 'mae':
                    logging.info('Using mean absolute error loss')
                    loss_probs = tf.nn.sigmoid(loss_logits)
                    negative_log_likelihood = tf.keras.losses.mean_absolute_error(
                        labels, loss_probs)

                negative_log_likelihood = tf.reduce_mean(
                    negative_log_likelihood)

                l2_loss = sum(model.losses)
                loss = negative_log_likelihood + l2_loss
                # Scale the loss given the TPUStrategy will reduce sum all gradients.
                scaled_loss = loss / strategy.num_replicas_in_sync

            grads = tape.gradient(scaled_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            probs = tf.nn.sigmoid(logits)
            # Cast labels to discrete for ECE computation.
            ece_labels = tf.cast(labels > FLAGS.ece_label_threshold,
                                 tf.float32)
            one_hot_labels = tf.one_hot(tf.cast(ece_labels, tf.int32),
                                        depth=num_classes)
            ece_probs = tf.concat([1. - probs, probs], axis=1)
            auc_probs = tf.squeeze(probs, axis=1)
            pred_labels = tf.math.argmax(ece_probs, axis=-1)

            sample_weight = generate_sample_weight(
                labels, class_weight['train/{}'.format(dataset_name)],
                FLAGS.ece_label_threshold)
            metrics['train/negative_log_likelihood'].update_state(
                negative_log_likelihood)
            metrics['train/accuracy'].update_state(labels, pred_labels)
            metrics['train/accuracy_weighted'].update_state(
                ece_labels, pred_labels, sample_weight=sample_weight)
            metrics['train/auroc'].update_state(labels, auc_probs)
            metrics['train/loss'].update_state(loss)
            metrics['train/ece'].update_state(ece_labels, ece_probs)
            metrics['train/precision'].update_state(ece_labels, pred_labels)
            metrics['train/recall'].update_state(ece_labels, pred_labels)
            metrics['train/f1'].update_state(one_hot_labels, ece_probs)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def test_step(iterator, dataset_name):
        """Evaluation StepFn to log metrics."""
        def step_fn(inputs):
            """Per-Replica StepFn."""
            features, labels, _ = utils.create_feature_and_label(inputs)

            eval_start_time = time.time()
            logits = model(features, training=False)
            eval_time = (time.time() -
                         eval_start_time) / FLAGS.per_core_batch_size

            if FLAGS.use_bfloat16:
                logits = tf.cast(logits, tf.float32)
            probs = tf.nn.sigmoid(logits)
            # Cast labels to discrete for ECE computation.
            ece_labels = tf.cast(labels > FLAGS.ece_label_threshold,
                                 tf.float32)
            one_hot_labels = tf.one_hot(tf.cast(ece_labels, tf.int32),
                                        depth=num_classes)
            ece_probs = tf.concat([1. - probs, probs], axis=1)
            pred_labels = tf.math.argmax(ece_probs, axis=-1)
            auc_probs = tf.squeeze(probs, axis=1)

            loss_logits = tf.squeeze(logits, axis=1)
            negative_log_likelihood = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(labels, loss_logits))

            sample_weight = generate_sample_weight(
                labels, class_weight['test/{}'.format(dataset_name)],
                FLAGS.ece_label_threshold)
            if dataset_name == 'ind':
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/auroc'].update_state(labels, auc_probs)
                metrics['test/aupr'].update_state(labels, auc_probs)
                metrics['test/brier'].update_state(labels, auc_probs)
                metrics['test/brier_weighted'].update_state(
                    tf.expand_dims(labels, -1),
                    probs,
                    sample_weight=sample_weight)
                metrics['test/ece'].update_state(ece_labels, ece_probs)
                metrics['test/acc'].update_state(ece_labels, pred_labels)
                metrics['test/acc_weighted'].update_state(
                    ece_labels, pred_labels, sample_weight=sample_weight)
                metrics['test/eval_time'].update_state(eval_time)
                metrics['test/precision'].update_state(ece_labels, pred_labels)
                metrics['test/recall'].update_state(ece_labels, pred_labels)
                metrics['test/f1'].update_state(one_hot_labels, ece_probs)
                for fraction in FLAGS.fractions:
                    metrics['test_collab_acc/collab_acc_{}'.format(
                        fraction)].update_state(ece_labels, ece_probs)
            else:
                metrics['test/nll_{}'.format(dataset_name)].update_state(
                    negative_log_likelihood)
                metrics['test/auroc_{}'.format(dataset_name)].update_state(
                    labels, auc_probs)
                metrics['test/aupr_{}'.format(dataset_name)].update_state(
                    labels, auc_probs)
                metrics['test/brier_{}'.format(dataset_name)].update_state(
                    labels, auc_probs)
                metrics['test/brier_weighted_{}'.format(
                    dataset_name)].update_state(tf.expand_dims(labels, -1),
                                                probs,
                                                sample_weight=sample_weight)
                metrics['test/ece_{}'.format(dataset_name)].update_state(
                    ece_labels, ece_probs)
                metrics['test/acc_{}'.format(dataset_name)].update_state(
                    ece_labels, pred_labels)
                metrics['test/acc_weighted_{}'.format(
                    dataset_name)].update_state(ece_labels,
                                                pred_labels,
                                                sample_weight=sample_weight)
                metrics['test/eval_time_{}'.format(dataset_name)].update_state(
                    eval_time)
                metrics['test/precision_{}'.format(dataset_name)].update_state(
                    ece_labels, pred_labels)
                metrics['test/recall_{}'.format(dataset_name)].update_state(
                    ece_labels, pred_labels)
                metrics['test/f1_{}'.format(dataset_name)].update_state(
                    one_hot_labels, ece_probs)
                for fraction in FLAGS.fractions:
                    metrics['test_collab_acc/collab_acc_{}_{}'.format(
                        fraction,
                        dataset_name)].update_state(ece_labels, ece_probs)

        strategy.run(step_fn, args=(next(iterator), ))

    @tf.function
    def final_eval_step(iterator):
        """Final Evaluation StepFn to save prediction to directory."""
        def step_fn(inputs):
            bert_features, labels, additional_labels = utils.create_feature_and_label(
                inputs)
            logits = model(bert_features, training=False)
            features = inputs['input_ids']
            return features, logits, labels, additional_labels

        (per_replica_texts, per_replica_logits, per_replica_labels,
         per_replica_additional_labels) = (strategy.run(
             step_fn, args=(next(iterator), )))

        if strategy.num_replicas_in_sync > 1:
            texts_list = tf.concat(per_replica_texts.values, axis=0)
            logits_list = tf.concat(per_replica_logits.values, axis=0)
            labels_list = tf.concat(per_replica_labels.values, axis=0)
            additional_labels_dict = {}
            for additional_label in utils.IDENTITY_LABELS:
                if additional_label in per_replica_additional_labels:
                    additional_labels_dict[additional_label] = tf.concat(
                        per_replica_additional_labels[additional_label],
                        axis=0)
        else:
            texts_list = per_replica_texts
            logits_list = per_replica_logits
            labels_list = per_replica_labels
            additional_labels_dict = {}
            for additional_label in utils.IDENTITY_LABELS:
                if additional_label in per_replica_additional_labels:
                    additional_labels_dict[
                        additional_label] = per_replica_additional_labels[
                            additional_label]

        return texts_list, logits_list, labels_list, additional_labels_dict

    if FLAGS.prediction_mode:
        # Prediction and exit.
        for dataset_name, test_dataset in test_datasets.items():
            test_iterator = iter(test_dataset)  # pytype: disable=wrong-arg-types
            message = 'Final eval on dataset {}'.format(dataset_name)
            logging.info(message)

            texts_all = []
            logits_all = []
            labels_all = []
            additional_labels_all_dict = {}
            if 'identity' in dataset_name:
                for identity_label_name in utils.IDENTITY_LABELS:
                    additional_labels_all_dict[identity_label_name] = []

            try:
                with tf.experimental.async_scope():
                    for step in range(steps_per_eval[dataset_name]):
                        if step % 20 == 0:
                            message = 'Starting to run eval step {}/{} of dataset: {}'.format(
                                step, steps_per_eval[dataset_name],
                                dataset_name)
                            logging.info(message)

                        (text_step, logits_step, labels_step,
                         additional_labels_dict_step
                         ) = final_eval_step(test_iterator)

                        texts_all.append(text_step)
                        logits_all.append(logits_step)
                        labels_all.append(labels_step)
                        if 'identity' in dataset_name:
                            for identity_label_name in utils.IDENTITY_LABELS:
                                additional_labels_all_dict[
                                    identity_label_name].append(
                                        additional_labels_dict_step[
                                            identity_label_name])

            except (StopIteration, tf.errors.OutOfRangeError):
                tf.experimental.async_clear_error()
                logging.info('Done with eval on %s', dataset_name)

            texts_all = tf.concat(texts_all, axis=0)
            logits_all = tf.concat(logits_all, axis=0)
            labels_all = tf.concat(labels_all, axis=0)
            additional_labels_all = []
            if additional_labels_all_dict:
                for identity_label_name in utils.IDENTITY_LABELS:
                    additional_labels_all.append(
                        tf.concat(
                            additional_labels_all_dict[identity_label_name],
                            axis=0))
            additional_labels_all = tf.convert_to_tensor(additional_labels_all)

            utils.save_prediction(texts_all.numpy(),
                                  path=os.path.join(
                                      FLAGS.output_dir,
                                      'texts_{}'.format(dataset_name)))
            utils.save_prediction(labels_all.numpy(),
                                  path=os.path.join(
                                      FLAGS.output_dir,
                                      'labels_{}'.format(dataset_name)))
            utils.save_prediction(logits_all.numpy(),
                                  path=os.path.join(
                                      FLAGS.output_dir,
                                      'logits_{}'.format(dataset_name)))
            if 'identity' in dataset_name:
                utils.save_prediction(
                    additional_labels_all.numpy(),
                    path=os.path.join(
                        FLAGS.output_dir,
                        'additional_labels_{}'.format(dataset_name)))
            logging.info('Done with testing on %s', dataset_name)

    else:
        # Execute train / eval loop.
        start_time = time.time()
        train_iterators = {}
        for dataset_name, train_dataset in train_datasets.items():
            train_iterators[dataset_name] = iter(train_dataset)
        for epoch in range(initial_epoch, FLAGS.train_epochs):
            logging.info('Starting to run epoch: %s', epoch)
            current_step = epoch * total_steps_per_epoch
            for dataset_name, train_iterator in train_iterators.items():
                for step in range(dataset_steps_per_epoch[dataset_name]):
                    train_step(train_iterator, dataset_name)

                    current_step += 1
                    max_steps = total_steps_per_epoch * FLAGS.train_epochs
                    time_elapsed = time.time() - start_time
                    steps_per_sec = float(current_step) / time_elapsed
                    eta_seconds = (max_steps - current_step) / steps_per_sec
                    message = (
                        '{:.1%} completion: epoch {:d}/{:d}. {:.1f} steps/s. '
                        'ETA: {:.0f} min. Time elapsed: {:.0f} min'.format(
                            current_step / max_steps, epoch + 1,
                            FLAGS.train_epochs, steps_per_sec,
                            eta_seconds / 60, time_elapsed / 60))
                    if step % 20 == 0:
                        logging.info(message)

            if epoch % FLAGS.evaluation_interval == 0:
                for dataset_name, test_dataset in test_datasets.items():
                    test_iterator = iter(test_dataset)  # pytype: disable=wrong-arg-types
                    logging.info('Testing on dataset %s', dataset_name)

                    try:
                        with tf.experimental.async_scope():
                            for step in range(steps_per_eval[dataset_name]):
                                if step % 20 == 0:
                                    logging.info(
                                        'Starting to run eval step %s/%s of epoch: %s',
                                        step, steps_per_eval[dataset_name],
                                        epoch)
                                test_step(test_iterator, dataset_name)
                    except (StopIteration, tf.errors.OutOfRangeError):
                        tf.experimental.async_clear_error()
                        logging.info('Done with testing on %s', dataset_name)

                logging.info('Train Loss: %.4f, AUROC: %.4f',
                             metrics['train/loss'].result(),
                             metrics['train/auroc'].result())
                logging.info('Test NLL: %.4f, AUROC: %.4f',
                             metrics['test/negative_log_likelihood'].result(),
                             metrics['test/auroc'].result())

                # record results
                total_results = {}
                for name, metric in metrics.items():
                    total_results[name] = metric.result()

                with summary_writer.as_default():
                    for name, result in total_results.items():
                        tf.summary.scalar(name, result, step=epoch + 1)

            for name, metric in metrics.items():
                metric.reset_states()

            checkpoint_interval = min(FLAGS.checkpoint_interval,
                                      FLAGS.train_epochs)
            if checkpoint_interval > 0 and (epoch +
                                            1) % checkpoint_interval == 0:
                checkpoint_name = checkpoint.save(
                    os.path.join(FLAGS.output_dir, 'checkpoint'))
                logging.info('Saved checkpoint to %s', checkpoint_name)

        # Save model in SavedModel format on exit.
        final_save_name = os.path.join(FLAGS.output_dir, 'model')
        model.save(final_save_name)
        logging.info('Saved model to %s', final_save_name)