Beispiel #1
0
  def testEnsembleCrossEntropy(self):
    """Checks that ensemble cross entropy lower-bounds Gibbs cross entropy."""
    # For multi-class classifications
    batch_size = 2
    num_classes = 3
    ensemble_size = 5
    labels = tf.random.uniform(
        [batch_size], minval=0, maxval=num_classes, dtype=tf.int32)
    logits = tf.random.normal([ensemble_size, batch_size, num_classes])
    ensemble_error = um.ensemble_cross_entropy(labels, logits)
    gibbs_error = um.gibbs_cross_entropy(labels, logits)
    self.assertEqual(ensemble_error.shape, ())
    self.assertEqual(gibbs_error.shape, ())
    self.assertLessEqual(ensemble_error, gibbs_error)

    # For binary classifications
    num_classes = 1
    labels = tf.random.uniform(
        [batch_size], minval=0, maxval=num_classes, dtype=tf.float32)
    logits = tf.random.normal([ensemble_size, batch_size, num_classes])
    loss_logits = tf.squeeze(logits, axis=-1)
    ensemble_error = um.ensemble_cross_entropy(labels, loss_logits, binary=True)
    gibbs_error = um.gibbs_cross_entropy(labels, loss_logits, binary=True)
    self.assertEqual(ensemble_error.shape, ())
    self.assertEqual(gibbs_error.shape, ())
    self.assertLessEqual(ensemble_error, gibbs_error)
Beispiel #2
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size)
    test_datasets = {'clean': dataset}
    extra_kwargs = {}
    if FLAGS.dataset == 'cifar100':
        extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                severity=severity,
                split=tfds.Split.TEST,
                **extra_kwargs).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet(input_shape=ds_info.features['image'].shape,
                                  depth=28,
                                  width_multiplier=10,
                                  num_classes=num_classes,
                                  l2=0.,
                                  version=2)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir)
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits.append(model(features, training=False))

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece':
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
    for i in range(ensemble_size):
        metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
        metrics['test/accuracy_member_{}'.format(i)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
    test_diversity = {
        'test/disagreement': tf.keras.metrics.Mean(),
        'test/average_kl': tf.keras.metrics.Mean(),
        'test/cosine_similarity': tf.keras.metrics.Mean(),
    }
    metrics.update(test_diversity)

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce = um.gibbs_cross_entropy(labels, logits)
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].add_batch(probs, label=labels)

                for i in range(ensemble_size):
                    member_probs = per_probs[i]
                    member_loss = tf.keras.losses.sparse_categorical_crossentropy(
                        labels, member_probs)
                    metrics['test/nll_member_{}'.format(i)].update_state(
                        member_loss)
                    metrics['test/accuracy_member_{}'.format(i)].update_state(
                        labels, member_probs)
                    diversity = rm.metrics.AveragePairwiseDiversity()
                    diversity.add_batch(per_probs, num_models=ensemble_size)
                    diversity_results = diversity.result()
                for k, v in diversity_results.items():
                    test_diversity['test/' + k].update_state(v)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].add_batch(
                    probs, label=labels)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    # Metrics from Robustness Metrics (like ECE) will return a dict with a
    # single key/value, instead of a scalar.
    total_results = {
        k: (list(v.values())[0] if isinstance(v, dict) else v)
        for k, v in total_results.items()
    }
    logging.info('Metrics: %s', total_results)
Beispiel #3
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_eval = IMAGENET_VALIDATION_IMAGES // batch_size

  builder = utils.ImageNetInput(data_dir=FLAGS.data_dir,
                                use_bfloat16=False)
  clean_test_dataset = builder.as_dataset(split=tfds.Split.TEST,
                                          batch_size=batch_size)
  test_datasets = {'clean': clean_test_dataset}
  corruption_types, max_intensity = utils.load_corrupted_test_info()
  for name in corruption_types:
    for intensity in range(1, max_intensity + 1):
      dataset_name = '{0}_{1}'.format(name, intensity)
      test_datasets[dataset_name] = utils.load_corrupted_test_dataset(
          corruption_name=name,
          corruption_intensity=intensity,
          batch_size=batch_size,
          drop_remainder=True,
          use_bfloat16=False)

  model = ub.models.resnet50_deterministic(input_shape=(224, 224, 3),
                                           num_classes=NUM_CLASSES)

  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())
  # Search for checkpoints from their index file; then remove the index suffix.
  ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
                                                     '**/*.index'))
  ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
  ensemble_size = len(ensemble_filenames)
  logging.info('Ensemble size: %s', ensemble_size)
  logging.info('Ensemble number of weights: %s',
               ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, ensemble_filename in enumerate(ensemble_filenames):
    checkpoint.restore(ensemble_filename)
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits = []
        test_iterator = iter(test_dataset)
        for _ in range(steps_per_eval):
          features, _ = next(test_iterator)  # pytype: disable=attribute-error
          logits.append(model(features, training=False))

        logits = tf.concat(logits, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits.numpy())
      percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent,
                                            m + 1,
                                            ensemble_size,
                                            n + 1,
                                            num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
      'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
  }
  corrupt_metrics = {}
  for name in test_datasets:
    corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
    corrupt_metrics['test/accuracy_{}'.format(name)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
    corrupt_metrics['test/ece_{}'.format(
        name)] = um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)

  # Evaluate model predictions.
  for n, (name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for m in range(ensemble_size):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      _, labels = next(test_iterator)  # pytype: disable=attribute-error
      logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
      labels = tf.cast(tf.reshape(labels, [-1]), tf.int32)
      negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
      per_probs = tf.nn.softmax(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      if name == 'clean':
        gibbs_ce = um.gibbs_cross_entropy(labels, logits)
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)
      else:
        corrupt_metrics['test/nll_{}'.format(name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(name)].update_state(
            labels, probs)

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                    corruption_types,
                                                    max_intensity,
                                                    FLAGS.alexnet_errors_path)
  total_results = {name: metric.result() for name, metric in metrics.items()}
  total_results.update(corrupt_results)
  logging.info('Metrics: %s', total_results)
Beispiel #4
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  ds_info = tfds.builder(FLAGS.dataset).info
  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_eval = ds_info.splits['test'].num_examples // batch_size
  num_classes = ds_info.features['label'].num_classes

  dataset_input_fn = utils.load_input_fn(
      split=tfds.Split.TEST,
      name=FLAGS.dataset,
      batch_size=FLAGS.per_core_batch_size,
      use_bfloat16=FLAGS.use_bfloat16)
  test_datasets = {'clean': dataset_input_fn()}
  corruption_types, max_intensity = utils.load_corrupted_test_info(
      FLAGS.dataset)
  for name in corruption_types:
    for intensity in range(1, max_intensity + 1):
      dataset_name = '{0}_{1}'.format(name, intensity)
      if FLAGS.dataset == 'cifar10':
        load_c_dataset = utils.load_cifar10_c_input_fn
      else:
        load_c_dataset = functools.partial(
            utils.load_cifar100_c_input_fn, path=FLAGS.cifar100_c_path)
      corrupted_input_fn = load_c_dataset(
          corruption_name=name,
          corruption_intensity=intensity,
          batch_size=FLAGS.per_core_batch_size,
          use_bfloat16=FLAGS.use_bfloat16)
      test_datasets[dataset_name] = corrupted_input_fn()

  model = ub.models.wide_resnet(
      input_shape=ds_info.features['image'].shape,
      depth=28,
      width_multiplier=10,
      num_classes=num_classes,
      l2=0.,
      version=2)
  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())

  # Search for checkpoints from their index file; then remove the index suffix.
  ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
                                                     '**/*.index'))
  ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
  ensemble_size = len(ensemble_filenames)
  logging.info('Ensemble size: %s', ensemble_size)
  logging.info('Ensemble number of weights: %s',
               ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, ensemble_filename in enumerate(ensemble_filenames):
    checkpoint.restore(ensemble_filename)
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits = []
        test_iterator = iter(test_dataset)
        for _ in range(steps_per_eval):
          features, _ = next(test_iterator)  # pytype: disable=attribute-error
          logits.append(model(features, training=False))

        logits = tf.concat(logits, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits.numpy())
      percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent,
                                            m + 1,
                                            ensemble_size,
                                            n + 1,
                                            num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
      'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
  }
  corrupt_metrics = {}
  for name in test_datasets:
    corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
    corrupt_metrics['test/accuracy_{}'.format(name)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
    corrupt_metrics['test/ece_{}'.format(name)] = (
        um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
  for i in range(ensemble_size):
    metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
    metrics['test/accuracy_member_{}'.format(i)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
  test_diversity = {
      'test/disagreement': tf.keras.metrics.Mean(),
      'test/average_kl': tf.keras.metrics.Mean(),
      'test/cosine_similarity': tf.keras.metrics.Mean(),
  }
  metrics.update(test_diversity)

  # Evaluate model predictions.
  for n, (name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for m in range(ensemble_size):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      _, labels = next(test_iterator)  # pytype: disable=attribute-error
      logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
      labels = tf.cast(labels, tf.int32)
      negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
      per_probs = tf.nn.softmax(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      if name == 'clean':
        gibbs_ce = um.gibbs_cross_entropy(labels, logits)
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].update_state(labels, probs)

        for i in range(ensemble_size):
          member_probs = per_probs[i]
          member_loss = tf.keras.losses.sparse_categorical_crossentropy(
              labels, member_probs)
          metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
          metrics['test/accuracy_member_{}'.format(i)].update_state(
              labels, member_probs)
        diversity_results = um.average_pairwise_diversity(
            per_probs, ensemble_size)
        for k, v in diversity_results.items():
          test_diversity['test/' + k].update_state(v)
      else:
        corrupt_metrics['test/nll_{}'.format(name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(name)].update_state(
            labels, probs)

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                    corruption_types,
                                                    max_intensity)
  total_results = {name: metric.result() for name, metric in metrics.items()}
  total_results.update(corrupt_results)
  logging.info('Metrics: %s', total_results)
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ind_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='ind')
    ood_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='ood')
    all_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        split='test', data_dir=FLAGS.data_dir, data_mode='all')

    dataset_builders = {
        'clean': ind_dataset_builder,
        'ood': ood_dataset_builder,
        'all': all_dataset_builder
    }

    ds_info = ind_dataset_builder.tfds_info
    feature_size = ds_info.metadata['feature_size']
    # num_classes is number of valid intents plus out-of-scope intent
    num_classes = ds_info.features['intent_label'].num_classes + 1
    # vocab_size is total number of valid tokens plus the out-of-vocabulary token.
    vocab_size = ind_dataset_builder.tokenizer.num_words + 1

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores

    test_datasets = {}
    steps_per_eval = {}
    for dataset_name, dataset_builder in dataset_builders.items():
        test_datasets[dataset_name] = dataset_builder.load(
            batch_size=batch_size)
        steps_per_eval[
            dataset_name] = dataset_builder.num_examples // batch_size

    bert_config_dir, _ = sngp.resolve_bert_ckpt_and_config_dir(
        FLAGS.bert_dir, FLAGS.bert_config_dir, FLAGS.bert_ckpt_dir)
    bert_config = bert_utils.create_config(bert_config_dir)

    gp_layer_kwargs = dict(num_inducing=FLAGS.gp_hidden_dim,
                           gp_kernel_scale=FLAGS.gp_scale,
                           gp_output_bias=FLAGS.gp_bias,
                           normalize_input=FLAGS.gp_input_normalization,
                           gp_cov_momentum=FLAGS.gp_cov_discount_factor,
                           gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty)
    spec_norm_kwargs = dict(iteration=FLAGS.spec_norm_iteration,
                            norm_multiplier=FLAGS.spec_norm_bound)

    model, bert_encoder = ub.models.SngpBertBuilder(
        num_classes=num_classes,
        bert_config=bert_config,
        gp_layer_kwargs=gp_layer_kwargs,
        spec_norm_kwargs=spec_norm_kwargs,
        use_gp_layer=FLAGS.use_gp_layer,
        use_spec_norm_att=FLAGS.use_spec_norm_att,
        use_spec_norm_ffn=FLAGS.use_spec_norm_ffn,
        use_layer_norm_att=FLAGS.use_layer_norm_att,
        use_layer_norm_ffn=FLAGS.use_layer_norm_ffn,
        use_spec_norm_plr=FLAGS.use_spec_norm_plr)

    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits_list = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval[name]):
                    inputs = next(test_iterator)
                    features, _ = bert_utils.create_feature_and_label(
                        inputs, feature_size)

                    logits, covmat = model(features, training=False)
                    logits = ed.layers.utils.mean_field_logits(
                        logits,
                        covmat,
                        mean_field_factor=FLAGS.gp_mean_field_factor)

                    logits_list.append(logits)

                logits_list = tf.concat(logits_list, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits_list.numpy())

            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }

    for dataset_name, test_dataset in test_datasets.items():
        if dataset_name != 'clean':
            metrics.update({
                'test/nll_{}'.format(dataset_name):
                tf.keras.metrics.Mean(),
                'test/accuracy_{}'.format(dataset_name):
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'test/ece_{}'.format(dataset_name):
                um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
            })

    # Finally, define OOD metrics for the combined IND and OOD dataset.
    metrics.update({
        'test/auroc_all': tf.keras.metrics.AUC(curve='ROC'),
        'test/auprc_all': tf.keras.metrics.AUC(curve='PR')
    })

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval[name]):
            inputs = next(test_iterator)
            _, labels = bert_utils.create_feature_and_label(
                inputs, feature_size)
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce = um.gibbs_cross_entropy(labels, logits)
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                metrics['test/ece_{}'.format(name)].update_state(labels, probs)

            if dataset_name == 'all':
                ood_labels = tf.cast(labels == 150, labels.dtype)
                ood_probs = 1. - tf.reduce_max(probs, axis=-1)
                metrics['test/auroc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)
                metrics['test/auprc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    total_results = {name: metric.result() for name, metric in metrics.items()}
    logging.info('Metrics: %s', total_results)
Beispiel #6
0
def main(argv):
  del argv  # unused arg
  if not FLAGS.use_gpu:
    raise ValueError('Only GPU is currently supported.')
  if FLAGS.num_cores > 1:
    raise ValueError('Only a single accelerator is currently supported.')
  tf.random.set_seed(FLAGS.seed)
  tf.io.gfile.makedirs(FLAGS.output_dir)

  ds_info = tfds.builder(FLAGS.dataset).info
  batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
  steps_per_eval = ds_info.splits['test'].num_examples // batch_size
  num_classes = ds_info.features['label'].num_classes

  dataset = ub.datasets.get(
      FLAGS.dataset,
      split=tfds.Split.TEST).load(batch_size=batch_size)
  validation_percent = 1. - FLAGS.train_proportion
  val_dataset = ub.datasets.get(
      dataset_name=FLAGS.dataset,
      split=tfds.Split.VALIDATION,
      validation_percent=validation_percent,
      drop_remainder=False).load(batch_size=batch_size)
  steps_per_val_eval = int(ds_info.splits['train'].num_examples *
                           validation_percent) // batch_size

  test_datasets = {'clean': dataset}
  extra_kwargs = {}
  if FLAGS.dataset == 'cifar100':
    extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
  corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
  for corruption_type in corruption_types:
    for severity in range(1, 6):
      dataset = ub.datasets.get(
          f'{FLAGS.dataset}_corrupted',
          corruption_type=corruption_type,
          severity=severity,
          split=tfds.Split.TEST,
          **extra_kwargs).load(batch_size=batch_size)
      test_datasets[f'{corruption_type}_{severity}'] = dataset

  model = ub.models.wide_resnet(
      input_shape=ds_info.features['image'].shape,
      depth=28,
      width_multiplier=10,
      num_classes=num_classes,
      l2=0.,
      version=2)
  logging.info('Model input shape: %s', model.input_shape)
  logging.info('Model output shape: %s', model.output_shape)
  logging.info('Model number of weights: %s', model.count_params())

  # Search for checkpoints
  ensemble_filenames = parse_checkpoint_dir(FLAGS.checkpoint_dir)

  model_pool_size = len(ensemble_filenames)
  logging.info('Model pool size: %s', model_pool_size)
  logging.info('Ensemble size: %s', FLAGS.ensemble_size)
  logging.info('Ensemble number of weights: %s',
               FLAGS.ensemble_size * model.count_params())
  logging.info('Ensemble filenames: %s', str(ensemble_filenames))
  checkpoint = tf.train.Checkpoint(model=model)

  # Compute the logits on the validation set
  val_logits, val_labels = [], []
  for m, ensemble_filename in enumerate(ensemble_filenames):
    # Enforce memory clean-up
    tf.keras.backend.clear_session()
    checkpoint.restore(ensemble_filename)
    val_iterator = iter(val_dataset)
    val_logits_m = []
    for _ in range(steps_per_val_eval):
      inputs = next(val_iterator)
      features = inputs['features']
      labels = inputs['labels']
      val_logits_m.append(model(features, training=False))
      if m == 0:
        val_labels.append(labels)

    val_logits.append(tf.concat(val_logits_m, axis=0))
    if m == 0:
      val_labels = tf.concat(val_labels, axis=0)

    percent = (m + 1.) / model_pool_size
    message = ('{:.1%} completion for prediction on validation set: '
               'model {:d}/{:d}.'.format(percent, m + 1, model_pool_size))
    logging.info(message)

  selected_members, val_acc, val_nll = greedy_selection(val_logits, val_labels,
                                                        FLAGS.ensemble_size,
                                                        FLAGS.greedy_objective)
  unique_selected_members = list(set(selected_members))
  message = ('Members selected by greedy procedure: {} (with {} unique '
             'member(s))\n\t{}').format(
                 selected_members, len(unique_selected_members),
                 [ensemble_filenames[i] for i in selected_members])
  logging.info(message)
  val_metrics = {
      'val/accuracy': tf.keras.metrics.Mean(),
      'val/negative_log_likelihood': tf.keras.metrics.Mean()
  }
  val_metrics['val/accuracy'].update_state(val_acc)
  val_metrics['val/negative_log_likelihood'].update_state(val_nll)

  # Write model predictions to files.
  num_datasets = len(test_datasets)
  for m, member_id in enumerate(unique_selected_members):
    ensemble_filename = ensemble_filenames[member_id]
    checkpoint.restore(ensemble_filename)
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=member_id)
      filename = os.path.join(FLAGS.output_dir, filename)
      if not tf.io.gfile.exists(filename):
        logits = []
        test_iterator = iter(test_dataset)
        for _ in range(steps_per_eval):
          features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
          logits.append(model(features, training=False))

        logits = tf.concat(logits, axis=0)
        with tf.io.gfile.GFile(filename, 'w') as f:
          np.save(f, logits.numpy())

      numerator = m * num_datasets + (n + 1)
      denominator = len(unique_selected_members) * num_datasets
      percent = numerator / denominator
      message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                 'Dataset {:d}/{:d}'.format(percent,
                                            m + 1,
                                            len(unique_selected_members),
                                            n + 1,
                                            num_datasets))
      logging.info(message)

  metrics = {
      'test/negative_log_likelihood': tf.keras.metrics.Mean(),
      'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
      'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
      'test/ece': rm.metrics.ExpectedCalibrationError(
          num_bins=FLAGS.num_bins),
  }
  metrics.update(val_metrics)
  corrupt_metrics = {}
  for name in test_datasets:
    corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
    corrupt_metrics['test/accuracy_{}'.format(name)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
    corrupt_metrics['test/ece_{}'.format(name)] = (
        rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
  for i in range(len(unique_selected_members)):
    metrics['test/nll_member_{}'.format(i)] = tf.keras.metrics.Mean()
    metrics['test/accuracy_member_{}'.format(i)] = (
        tf.keras.metrics.SparseCategoricalAccuracy())
  test_diversity = {
      'test/disagreement': tf.keras.metrics.Mean(),
      'test/average_kl': tf.keras.metrics.Mean(),
      'test/cosine_similarity': tf.keras.metrics.Mean(),
  }
  metrics.update(test_diversity)

  # Evaluate model predictions.
  for n, (name, test_dataset) in enumerate(test_datasets.items()):
    logits_dataset = []
    for member_id in selected_members:
      filename = '{dataset}_{member}.npy'.format(dataset=name, member=member_id)
      filename = os.path.join(FLAGS.output_dir, filename)
      with tf.io.gfile.GFile(filename, 'rb') as f:
        logits_dataset.append(np.load(f))

    logits_dataset = tf.convert_to_tensor(logits_dataset)
    test_iterator = iter(test_dataset)
    for step in range(steps_per_eval):
      labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
      logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
      labels = tf.cast(labels, tf.int32)
      negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
      per_probs = tf.nn.softmax(logits)
      probs = tf.reduce_mean(per_probs, axis=0)
      if name == 'clean':
        gibbs_ce = um.gibbs_cross_entropy(labels, logits)
        metrics['test/negative_log_likelihood'].update_state(
            negative_log_likelihood)
        metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
        metrics['test/accuracy'].update_state(labels, probs)
        metrics['test/ece'].add_batch(probs, label=labels)

        # Attention must be paid to deal with duplicated members:
        # e.g.,
        #.    selected_members = [2, 7, 3, 3]
        #     unique_selected_members = [2, 3, 7]
        #     selected_members.index(3) --> 2
        for member_id in unique_selected_members:
          i = selected_members.index(member_id)
          member_probs = per_probs[i]
          member_loss = tf.keras.losses.sparse_categorical_crossentropy(
              labels, member_probs)
          metrics['test/nll_member_{}'.format(i)].update_state(member_loss)
          metrics['test/accuracy_member_{}'.format(i)].update_state(
              labels, member_probs)
        diversity_results = um.average_pairwise_diversity(
            per_probs, len(per_probs))
        for k, v in diversity_results.items():
          test_diversity['test/' + k].update_state(v)
      else:
        corrupt_metrics['test/nll_{}'.format(name)].update_state(
            negative_log_likelihood)
        corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
            labels, probs)
        corrupt_metrics['test/ece_{}'.format(name)].add_batch(
            probs, label=labels)

    message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
        (n + 1) / num_datasets, n + 1, num_datasets))
    logging.info(message)

  corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                    corruption_types)
  total_results = {name: metric.result() for name, metric in metrics.items()}
  total_results.update(corrupt_results)
  # Metrics from Robustness Metrics (like ECE) will return a dict with a
  # single key/value, instead of a scalar.
  total_results = {
      k: (list(v.values())[0] if isinstance(v, dict) else v)
      for k, v in total_results.items()
  }

  logging.info('Metrics: %s', total_results)
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ds_info = tfds.builder(FLAGS.dataset).info
    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores
    steps_per_eval = ds_info.splits['test'].num_examples // batch_size
    num_classes = ds_info.features['label'].num_classes

    dataset = ub.datasets.get(
        FLAGS.dataset, split=tfds.Split.TEST).load(batch_size=batch_size)
    test_datasets = {'clean': dataset}
    extra_kwargs = {}
    if FLAGS.dataset == 'cifar100':
        extra_kwargs['data_dir'] = FLAGS.cifar100_c_path
    corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
    for corruption_type in corruption_types:
        for severity in range(1, 6):
            dataset = ub.datasets.get(
                f'{FLAGS.dataset}_corrupted',
                corruption_type=corruption_type,
                severity=severity,
                split=tfds.Split.TEST,
                **extra_kwargs).load(batch_size=batch_size)
            test_datasets[f'{corruption_type}_{severity}'] = dataset

    model = ub.models.wide_resnet_sngp(
        input_shape=ds_info.features['image'].shape,
        batch_size=FLAGS.per_core_batch_size,
        depth=28,
        width_multiplier=10,
        num_classes=num_classes,
        l2=0.,
        use_mc_dropout=FLAGS.use_mc_dropout,
        use_filterwise_dropout=FLAGS.use_filterwise_dropout,
        dropout_rate=FLAGS.dropout_rate,
        use_gp_layer=FLAGS.use_gp_layer,
        gp_input_dim=FLAGS.gp_input_dim,
        gp_hidden_dim=FLAGS.gp_hidden_dim,
        gp_scale=FLAGS.gp_scale,
        gp_bias=FLAGS.gp_bias,
        gp_input_normalization=FLAGS.gp_input_normalization,
        gp_random_feature_type=FLAGS.gp_random_feature_type,
        gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
        gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
        use_spec_norm=FLAGS.use_spec_norm,
        spec_norm_iteration=FLAGS.spec_norm_iteration,
        spec_norm_bound=FLAGS.spec_norm_bound)
    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval):
                    features = next(test_iterator)['features']  # pytype: disable=unsupported-operands
                    logits_member = model(features, training=False)
                    if isinstance(logits_member, tuple):
                        # If model returns a tuple of (logits, covmat), extract both
                        logits_member, covmat_member = logits_member
                    else:
                        covmat_member = tf.eye(FLAGS.per_core_batch_size)
                    logits_member = ed.layers.utils.mean_field_logits(
                        logits_member, covmat_member,
                        FLAGS.gp_mean_field_factor_ensemble)
                    logits.append(logits_member)

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }
    corrupt_metrics = {}
    for name in test_datasets:
        corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
        corrupt_metrics['test/accuracy_{}'.format(name)] = (
            tf.keras.metrics.SparseCategoricalAccuracy())
        corrupt_metrics['test/ece_{}'.format(name)] = (
            um.ExpectedCalibrationError(num_bins=FLAGS.num_bins))

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval):
            labels = next(test_iterator)['labels']  # pytype: disable=unsupported-operands
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce = um.gibbs_cross_entropy(labels, logits)
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                corrupt_metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                corrupt_metrics['test/ece_{}'.format(name)].update_state(
                    labels, probs)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
                                                      corruption_types)
    total_results = {name: metric.result() for name, metric in metrics.items()}
    total_results.update(corrupt_results)
    logging.info('Metrics: %s', total_results)
Beispiel #8
0
def main(argv):
    del argv  # unused arg
    if not FLAGS.use_gpu:
        raise ValueError('Only GPU is currently supported.')
    if FLAGS.num_cores > 1:
        raise ValueError('Only a single accelerator is currently supported.')
    tf.random.set_seed(FLAGS.seed)
    tf.io.gfile.makedirs(FLAGS.output_dir)

    ind_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        batch_size=FLAGS.per_core_batch_size,
        eval_batch_size=FLAGS.per_core_batch_size,
        data_dir=FLAGS.data_dir,
        data_mode='ind')
    ood_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        batch_size=FLAGS.per_core_batch_size,
        eval_batch_size=FLAGS.per_core_batch_size,
        data_dir=FLAGS.data_dir,
        data_mode='ood')
    all_dataset_builder = ub.datasets.ClincIntentDetectionDataset(
        batch_size=FLAGS.per_core_batch_size,
        eval_batch_size=FLAGS.per_core_batch_size,
        data_dir=FLAGS.data_dir,
        data_mode='all')

    dataset_builders = {
        'clean': ind_dataset_builder,
        'ood': ood_dataset_builder,
        'all': all_dataset_builder
    }

    ds_info = ind_dataset_builder.info
    feature_size = ds_info['feature_size']
    # num_classes is number of valid intents plus out-of-scope intent
    num_classes = ds_info['num_classes'] + 1
    # vocab_size is total number of valid tokens plus the out-of-vocabulary token.
    vocab_size = ind_dataset_builder.tokenizer.num_words + 1

    batch_size = FLAGS.per_core_batch_size * FLAGS.num_cores

    test_datasets = {}
    steps_per_eval = {}
    for dataset_name, dataset_builder in dataset_builders.items():
        test_datasets[dataset_name] = dataset_builder.build(
            split=ub.datasets.base.Split.TEST)
        steps_per_eval[dataset_name] = (
            dataset_builder.info['num_test_examples'] // batch_size)

    if FLAGS.model_family.lower() == 'textcnn':
        model = cnn_model.textcnn(filter_sizes=FLAGS.filter_sizes,
                                  num_filters=FLAGS.num_filters,
                                  num_classes=num_classes,
                                  feature_size=feature_size,
                                  vocab_size=vocab_size,
                                  embed_size=FLAGS.embedding_size,
                                  dropout_rate=FLAGS.dropout_rate,
                                  l2=FLAGS.l2)
    elif FLAGS.model_family.lower() == 'bert':
        bert_config_dir, _ = deterministic.resolve_bert_ckpt_and_config_dir(
            FLAGS.bert_dir, FLAGS.bert_config_dir, FLAGS.bert_ckpt_dir)
        bert_config = bert_utils.create_config(bert_config_dir)
        model, _ = ub.models.BertBuilder(num_classes=num_classes,
                                         max_seq_length=feature_size,
                                         bert_config=bert_config)
    else:
        raise ValueError(
            'model_family ({}) can only be TextCNN or BERT.'.format(
                FLAGS.model_family))

    logging.info('Model input shape: %s', model.input_shape)
    logging.info('Model output shape: %s', model.output_shape)
    logging.info('Model number of weights: %s', model.count_params())

    # Search for checkpoints from their index file; then remove the index suffix.
    ensemble_filenames = tf.io.gfile.glob(
        os.path.join(FLAGS.checkpoint_dir, '**/*.index'))
    ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
    ensemble_size = len(ensemble_filenames)
    logging.info('Ensemble size: %s', ensemble_size)
    logging.info('Ensemble number of weights: %s',
                 ensemble_size * model.count_params())
    logging.info('Ensemble filenames: %s', str(ensemble_filenames))
    checkpoint = tf.train.Checkpoint(model=model)

    # Write model predictions to files.
    num_datasets = len(test_datasets)
    for m, ensemble_filename in enumerate(ensemble_filenames):
        checkpoint.restore(ensemble_filename)
        for n, (name, test_dataset) in enumerate(test_datasets.items()):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            if not tf.io.gfile.exists(filename):
                logits = []
                test_iterator = iter(test_dataset)
                for _ in range(steps_per_eval[name]):
                    inputs = next(test_iterator)
                    features, _ = deterministic.create_feature_and_label(
                        inputs, feature_size, model_family=FLAGS.model_family)
                    logits.append(model(features, training=False))

                logits = tf.concat(logits, axis=0)
                with tf.io.gfile.GFile(filename, 'w') as f:
                    np.save(f, logits.numpy())
            percent = (m * num_datasets +
                       (n + 1)) / (ensemble_size * num_datasets)
            message = (
                '{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
                'Dataset {:d}/{:d}'.format(percent, m + 1, ensemble_size,
                                           n + 1, num_datasets))
            logging.info(message)

    metrics = {
        'test/negative_log_likelihood': tf.keras.metrics.Mean(),
        'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
        'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
        'test/ece': um.ExpectedCalibrationError(num_bins=FLAGS.num_bins),
    }

    for dataset_name, test_dataset in test_datasets.items():
        if dataset_name != 'clean':
            metrics.update({
                'test/nll_{}'.format(dataset_name):
                tf.keras.metrics.Mean(),
                'test/accuracy_{}'.format(dataset_name):
                tf.keras.metrics.SparseCategoricalAccuracy(),
                'test/ece_{}'.format(dataset_name):
                um.ExpectedCalibrationError(num_bins=FLAGS.num_bins)
            })

    # Finally, define OOD metrics for the combined IND and OOD dataset.
    metrics.update({
        'test/auroc_all': tf.keras.metrics.AUC(curve='ROC'),
        'test/auprc_all': tf.keras.metrics.AUC(curve='PR')
    })

    # Evaluate model predictions.
    for n, (name, test_dataset) in enumerate(test_datasets.items()):
        logits_dataset = []
        for m in range(ensemble_size):
            filename = '{dataset}_{member}.npy'.format(dataset=name, member=m)
            filename = os.path.join(FLAGS.output_dir, filename)
            with tf.io.gfile.GFile(filename, 'rb') as f:
                logits_dataset.append(np.load(f))

        logits_dataset = tf.convert_to_tensor(logits_dataset)
        test_iterator = iter(test_dataset)
        for step in range(steps_per_eval[name]):
            inputs = next(test_iterator)
            _, labels = deterministic.create_feature_and_label(
                inputs, feature_size, model_family=FLAGS.model_family)
            logits = logits_dataset[:, (step * batch_size):((step + 1) *
                                                            batch_size)]
            labels = tf.cast(labels, tf.int32)
            negative_log_likelihood = um.ensemble_cross_entropy(labels, logits)
            per_probs = tf.nn.softmax(logits)
            probs = tf.reduce_mean(per_probs, axis=0)
            if name == 'clean':
                gibbs_ce = um.gibbs_cross_entropy(labels, logits)
                metrics['test/negative_log_likelihood'].update_state(
                    negative_log_likelihood)
                metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
                metrics['test/accuracy'].update_state(labels, probs)
                metrics['test/ece'].update_state(labels, probs)
            else:
                metrics['test/nll_{}'.format(name)].update_state(
                    negative_log_likelihood)
                metrics['test/accuracy_{}'.format(name)].update_state(
                    labels, probs)
                metrics['test/ece_{}'.format(name)].update_state(labels, probs)

            if dataset_name == 'all':
                ood_labels = tf.cast(labels == 150, labels.dtype)
                ood_probs = 1. - tf.reduce_max(probs, axis=-1)
                metrics['test/auroc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)
                metrics['test/auprc_{}'.format(dataset_name)].update_state(
                    ood_labels, ood_probs)

        message = (
            '{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
                (n + 1) / num_datasets, n + 1, num_datasets))
        logging.info(message)

    total_results = {name: metric.result() for name, metric in metrics.items()}
    logging.info('Metrics: %s', total_results)